This commit is contained in:
lidaofu 2024-06-26 14:53:10 +08:00
commit c24817671f
19 changed files with 2923 additions and 371 deletions

@ -1 +1 @@
Subproject commit 5144e2aa521df6d473308bfb31172054772a634f
Subproject commit 79c10fe4773819d99efe905bb2fecf3446f3b36c

View File

@ -452,6 +452,15 @@ elseif(NOT ANDROID OR IOS)
update_cached_list(MK_LINK_LIBRARIES pthread)
endif()
if(ENABLE_VIDEOSTACK)
if(ENABLE_FFMPEG AND ENABLE_X264)
message(STATUS "ENABLE_VIDEOSTACK defined")
update_cached_list(MK_COMPILE_DEFINITIONS ENABLE_VIDEOSTACK)
else()
message(WARNING "ENABLE_VIDEOSTACK requires ENABLE_FFMPEG and ENABLE_X264")
endif ()
endif ()
# ----------------------------------------------------------------------------
# Solution folders:
# ----------------------------------------------------------------------------

View File

@ -40,7 +40,7 @@ bool G711RtpEncoder::inputFrame(const Frame::Ptr &frame) {
auto remain_size = len;
size_t max_size = 160 * _channels * _pkt_dur_ms / 20; // 20 ms per 160 byte
size_t n = 0;
bool mark = true;
bool mark = false;
while (remain_size >= max_size) {
assert(remain_size >= max_size);
const size_t rtp_size = max_size;

View File

@ -235,13 +235,17 @@ void H264Track::setExtraData(const uint8_t *data, size_t bytes) {
}
bool H264Track::update() {
_config_frames = std::vector<Frame::Ptr>{
createConfigFrame<H264Frame>(_sps),
createConfigFrame<H264Frame>(_pps)
};
return getAVCInfo(_sps, _width, _height, _fps);
}
std::vector<Frame::Ptr> H264Track::getConfigFrames() const {
if (!ready()) {
return {};
}
return { createConfigFrame<H264Frame>(_sps, 0, getIndex()),
createConfigFrame<H264Frame>(_pps, 0, getIndex()) };
}
Track::Ptr H264Track::clone() const {
return std::make_shared<H264Track>(*this);
}

View File

@ -115,7 +115,7 @@ public:
toolkit::Buffer::Ptr getExtraData() const override;
void setExtraData(const uint8_t *data, size_t size) override;
bool update() override;
std::vector<Frame::Ptr> getConfigFrames() const override { return _config_frames; }
std::vector<Frame::Ptr> getConfigFrames() const override;
private:
Sdp::Ptr getSdp(uint8_t payload_type) const override;
@ -130,12 +130,11 @@ private:
float _fps = 0;
std::string _sps;
std::string _pps;
std::vector<Frame::Ptr> _config_frames;
};
template <typename FrameType>
Frame::Ptr createConfigFrame(const std::string &data, uint64_t dts = 0, int index = 0) {
auto frame = FrameType::create();
Frame::Ptr createConfigFrame(const std::string &data, uint64_t dts, int index) {
auto frame = FrameImp::create<FrameType>();
frame->_prefix_size = 4;
frame->_buffer.assign("\x00\x00\x00\x01", 4);
frame->_buffer.append(data);

View File

@ -182,14 +182,18 @@ void H265Track::setExtraData(const uint8_t *data, size_t bytes) {
}
bool H265Track::update() {
_config_frames = std::vector<Frame::Ptr>{
createConfigFrame<H265Frame>(_vps),
createConfigFrame<H265Frame>(_sps),
createConfigFrame<H265Frame>(_pps)
};
return getHEVCInfo(_vps, _sps, _width, _height, _fps);
}
std::vector<Frame::Ptr> H265Track::getConfigFrames() const {
if (!ready()) {
return {};
}
return { createConfigFrame<H265Frame>(_vps, 0, getIndex()),
createConfigFrame<H265Frame>(_sps, 0, getIndex()),
createConfigFrame<H265Frame>(_pps, 0, getIndex()) };
}
Track::Ptr H265Track::clone() const {
return std::make_shared<H265Track>(*this);
}
@ -199,13 +203,13 @@ void H265Track::insertConfigFrame(const Frame::Ptr &frame) {
return;
}
if (!_vps.empty()) {
VideoTrack::inputFrame(createConfigFrame<H265Frame>(_vps));
VideoTrack::inputFrame(createConfigFrame<H265Frame>(_vps, frame->dts(), frame->getIndex()));
}
if (!_sps.empty()) {
VideoTrack::inputFrame(createConfigFrame<H265Frame>(_sps));
VideoTrack::inputFrame(createConfigFrame<H265Frame>(_sps, frame->dts(), frame->getIndex()));
}
if (!_pps.empty()) {
VideoTrack::inputFrame(createConfigFrame<H265Frame>(_pps));
VideoTrack::inputFrame(createConfigFrame<H265Frame>(_pps, frame->dts(), frame->getIndex()));
}
}

View File

@ -142,7 +142,7 @@ public:
toolkit::Buffer::Ptr getExtraData() const override;
void setExtraData(const uint8_t *data, size_t size) override;
bool update() override;
std::vector<Frame::Ptr> getConfigFrames() const override { return _config_frames; }
std::vector<Frame::Ptr> getConfigFrames() const override;
private:
Sdp::Ptr getSdp(uint8_t payload_type) const override;
@ -158,7 +158,6 @@ private:
std::string _vps;
std::string _sps;
std::string _pps;
std::vector<Frame::Ptr> _config_frames;
};
}//namespace mediakit

View File

@ -18,23 +18,15 @@
// ITU-R BT.709
#define RGB_TO_Y(R, G, B) (((47 * (R) + 157 * (G) + 16 * (B) + 128) >> 8) + 16)
#define RGB_TO_U(R, G, B) (((-26 * (R)-87 * (G) + 112 * (B) + 128) >> 8) + 128)
#define RGB_TO_V(R, G, B) (((112 * (R)-102 * (G)-10 * (B) + 128) >> 8) + 128)
#define RGB_TO_U(R, G, B) (((-26 * (R) - 87 * (G) + 112 * (B) + 128) >> 8) + 128)
#define RGB_TO_V(R, G, B) (((112 * (R) - 102 * (G) - 10 * (B) + 128) >> 8) + 128)
INSTANCE_IMP(VideoStackManager)
Param::~Param()
{
VideoStackManager::Instance().unrefChannel(
id, width, height, pixfmt);
}
Param::~Param() { VideoStackManager::Instance().unrefChannel(id, width, height, pixfmt); }
Channel::Channel(const std::string& id, int width, int height, AVPixelFormat pixfmt)
: _id(id)
, _width(width)
, _height(height)
, _pixfmt(pixfmt)
{
: _id(id), _width(width), _height(height), _pixfmt(pixfmt) {
_tmp = std::make_shared<mediakit::FFmpegFrame>();
_tmp->get()->width = _width;
@ -53,88 +45,72 @@ Channel::Channel(const std::string& id, int width, int height, AVPixelFormat pix
_tmp = _sws->inputFrame(frame);
}
void Channel::addParam(const std::weak_ptr<Param>& p)
{
void Channel::addParam(const std::weak_ptr<Param>& p) {
std::lock_guard<std::recursive_mutex> lock(_mx);
_params.push_back(p);
}
void Channel::onFrame(const mediakit::FFmpegFrame::Ptr& frame)
{
void Channel::onFrame(const mediakit::FFmpegFrame::Ptr& frame) {
std::weak_ptr<Channel> weakSelf = shared_from_this();
_poller = _poller ? _poller : toolkit::WorkThreadPool::Instance().getPoller();
_poller->async([weakSelf, frame]() {
auto self = weakSelf.lock();
if (!self) {
return;
}
if (!self) { return; }
self->_tmp = self->_sws->inputFrame(frame);
self->forEachParam([self](const Param::Ptr& p) { self->fillBuffer(p); });
});
}
void Channel::forEachParam(const std::function<void(const Param::Ptr&)>& func)
{
void Channel::forEachParam(const std::function<void(const Param::Ptr&)>& func) {
for (auto& wp : _params) {
if (auto sp = wp.lock()) {
func(sp);
}
if (auto sp = wp.lock()) { func(sp); }
}
}
void Channel::fillBuffer(const Param::Ptr& p)
{
if (auto buf = p->weak_buf.lock()) {
copyData(buf, p);
}
void Channel::fillBuffer(const Param::Ptr& p) {
if (auto buf = p->weak_buf.lock()) { copyData(buf, p); }
}
void Channel::copyData(const mediakit::FFmpegFrame::Ptr& buf, const Param::Ptr& p)
{
void Channel::copyData(const mediakit::FFmpegFrame::Ptr& buf, const Param::Ptr& p) {
switch (p->pixfmt) {
case AV_PIX_FMT_YUV420P: {
for (int i = 0; i < p->height; i++) {
memcpy(buf->get()->data[0] + buf->get()->linesize[0] * (i + p->posY) + p->posX,
_tmp->get()->data[0] + _tmp->get()->linesize[0] * i,
_tmp->get()->width);
_tmp->get()->data[0] + _tmp->get()->linesize[0] * i, _tmp->get()->width);
}
//确保height为奇数时也能正确的复制到最后一行uv数据
// 确保height为奇数时也能正确的复制到最后一行uv数据
for (int i = 0; i < (p->height + 1) / 2; i++) {
// U平面
memcpy(buf->get()->data[1] + buf->get()->linesize[1] * (i + p->posY / 2) + p->posX / 2,
_tmp->get()->data[1] + _tmp->get()->linesize[1] * i,
_tmp->get()->width / 2);
memcpy(buf->get()->data[1] + buf->get()->linesize[1] * (i + p->posY / 2) +
p->posX / 2,
_tmp->get()->data[1] + _tmp->get()->linesize[1] * i, _tmp->get()->width / 2);
// V平面
memcpy(buf->get()->data[2] + buf->get()->linesize[2] * (i + p->posY / 2) + p->posX / 2,
_tmp->get()->data[2] + _tmp->get()->linesize[2] * i,
_tmp->get()->width / 2);
memcpy(buf->get()->data[2] + buf->get()->linesize[2] * (i + p->posY / 2) +
p->posX / 2,
_tmp->get()->data[2] + _tmp->get()->linesize[2] * i, _tmp->get()->width / 2);
}
break;
}
case AV_PIX_FMT_NV12: {
//TODO: 待实现
// TODO: 待实现
break;
}
default:
WarnL << "No support pixformat: " << av_get_pix_fmt_name(p->pixfmt);
break;
default: WarnL << "No support pixformat: " << av_get_pix_fmt_name(p->pixfmt); break;
}
}
void StackPlayer::addChannel(const std::weak_ptr<Channel>& chn)
{
void StackPlayer::addChannel(const std::weak_ptr<Channel>& chn) {
std::lock_guard<std::recursive_mutex> lock(_mx);
_channels.push_back(chn);
}
void StackPlayer::play()
{
void StackPlayer::play() {
auto url = _url;
//创建拉流 解码对象
// 创建拉流 解码对象
_player = std::make_shared<mediakit::MediaPlayer>();
std::weak_ptr<mediakit::MediaPlayer> weakPlayer = _player;
@ -146,13 +122,9 @@ void StackPlayer::play()
_player->setOnPlayResult([weakPlayer, weakSelf, url](const toolkit::SockException& ex) mutable {
TraceL << "StackPlayer: " << url << " OnPlayResult: " << ex.what();
auto strongPlayer = weakPlayer.lock();
if (!strongPlayer) {
return;
}
if (!strongPlayer) { return; }
auto self = weakSelf.lock();
if (!self) {
return;
}
if (!self) { return; }
if (!ex) {
// 取消定时器
@ -164,19 +136,18 @@ void StackPlayer::play()
self->rePlay(url);
}
auto videoTrack = std::dynamic_pointer_cast<mediakit::VideoTrack>(strongPlayer->getTrack(mediakit::TrackVideo, false));
//auto audioTrack = std::dynamic_pointer_cast<mediakit::AudioTrack>(strongPlayer->getTrack(mediakit::TrackAudio, false));
auto videoTrack = std::dynamic_pointer_cast<mediakit::VideoTrack>(
strongPlayer->getTrack(mediakit::TrackVideo, false));
// auto audioTrack = std::dynamic_pointer_cast<mediakit::AudioTrack>(strongPlayer->getTrack(mediakit::TrackAudio, false));
if (videoTrack) {
//TODO:添加使用显卡还是cpu解码的判断逻辑
//auto decoder = std::make_shared<FFmpegDecoder>(videoTrack, 1, std::vector<std::string>{ "hevc_cuvid", "h264_cuvid"});
auto decoder = std::make_shared<mediakit::FFmpegDecoder>(videoTrack, 0, std::vector<std::string> { "h264", "hevc" });
// TODO:添加使用显卡还是cpu解码的判断逻辑
auto decoder = std::make_shared<mediakit::FFmpegDecoder>(
videoTrack, 0, std::vector<std::string>{"h264", "hevc"});
decoder->setOnDecode([weakSelf](const mediakit::FFmpegFrame::Ptr& frame) mutable {
auto self = weakSelf.lock();
if (!self) {
return;
}
if (!self) { return; }
self->onFrame(frame);
});
@ -190,14 +161,10 @@ void StackPlayer::play()
_player->setOnShutdown([weakPlayer, url, weakSelf](const toolkit::SockException& ex) {
TraceL << "StackPlayer: " << url << " OnShutdown: " << ex.what();
auto strongPlayer = weakPlayer.lock();
if (!strongPlayer) {
return;
}
if (!strongPlayer) { return; }
auto self = weakSelf.lock();
if (!self) {
return;
}
if (!self) { return; }
self->onDisconnect();
@ -207,18 +174,14 @@ void StackPlayer::play()
_player->play(url);
}
void StackPlayer::onFrame(const mediakit::FFmpegFrame::Ptr& frame)
{
void StackPlayer::onFrame(const mediakit::FFmpegFrame::Ptr& frame) {
std::lock_guard<std::recursive_mutex> lock(_mx);
for (auto& weak_chn : _channels) {
if (auto chn = weak_chn.lock()) {
chn->onFrame(frame);
}
if (auto chn = weak_chn.lock()) { chn->onFrame(frame); }
}
}
void StackPlayer::onDisconnect()
{
void StackPlayer::onDisconnect() {
std::lock_guard<std::recursive_mutex> lock(_mx);
for (auto& weak_chn : _channels) {
if (auto chn = weak_chn.lock()) {
@ -228,31 +191,22 @@ void StackPlayer::onDisconnect()
}
}
void StackPlayer::rePlay(const std::string& url)
{
void StackPlayer::rePlay(const std::string& url) {
_failedCount++;
auto delay = MAX(2 * 1000, MIN(_failedCount * 3 * 1000, 60 * 1000)); //步进延迟 重试间隔
auto delay = MAX(2 * 1000, MIN(_failedCount * 3 * 1000, 60 * 1000));// 步进延迟 重试间隔
std::weak_ptr<StackPlayer> weakSelf = shared_from_this();
_timer = std::make_shared<toolkit::Timer>(
delay / 1000.0f, [weakSelf, url]() {
_timer = std::make_shared<toolkit::Timer>(delay / 1000.0f, [weakSelf, url]() {
auto self = weakSelf.lock();
if (!self) {
}
if (!self) {}
WarnL << "replay [" << self->_failedCount << "]:" << url;
self->_player->play(url);
return false;
},
nullptr);
}, nullptr);
}
VideoStack::VideoStack(const std::string& id, int width, int height, AVPixelFormat pixfmt, float fps, int bitRate)
: _id(id)
, _width(width)
, _height(height)
, _pixfmt(pixfmt)
, _fps(fps)
, _bitRate(bitRate)
{
VideoStack::VideoStack(const std::string& id, int width, int height, AVPixelFormat pixfmt,
float fps, int bitRate)
: _id(id), _width(width), _height(height), _pixfmt(pixfmt), _fps(fps), _bitRate(bitRate) {
_buffer = std::make_shared<mediakit::FFmpegFrame>();
@ -262,7 +216,8 @@ VideoStack::VideoStack(const std::string& id, int width, int height, AVPixelForm
av_frame_get_buffer(_buffer->get(), 32);
_dev = std::make_shared<mediakit::DevChannel>(mediakit::MediaTuple { DEFAULT_VHOST, "live", _id });
_dev = std::make_shared<mediakit::DevChannel>(
mediakit::MediaTuple{DEFAULT_VHOST, "live", _id, ""});
mediakit::VideoInfo info;
info.codecId = mediakit::CodecH264;
@ -272,34 +227,28 @@ VideoStack::VideoStack(const std::string& id, int width, int height, AVPixelForm
info.iBitRate = _bitRate;
_dev->initVideo(info);
//dev->initAudio(); //TODO:音频
// dev->initAudio(); //TODO:音频
_dev->addTrackCompleted();
_isExit = false;
}
VideoStack::~VideoStack()
{
VideoStack::~VideoStack() {
_isExit = true;
if (_thread.joinable()) {
_thread.join();
}
if (_thread.joinable()) { _thread.join(); }
}
void VideoStack::setParam(const Params& params)
{
void VideoStack::setParam(const Params& params) {
if (_params) {
for (auto& p : (*_params)) {
if (!p)
continue;
if (!p) continue;
p->weak_buf.reset();
}
}
initBgColor();
for (auto& p : (*params)) {
if (!p)
continue;
if (!p) continue;
p->weak_buf = _buffer;
if (auto chn = p->weak_chn.lock()) {
chn->addParam(p);
@ -309,14 +258,14 @@ void VideoStack::setParam(const Params& params)
_params = params;
}
void VideoStack::start()
{
void VideoStack::start() {
_thread = std::thread([&]() {
uint64_t pts = 0;
int frameInterval = 1000 / _fps;
auto lastEncTP = std::chrono::steady_clock::now();
while (!_isExit) {
if (std::chrono::steady_clock::now() - lastEncTP > std::chrono::milliseconds(frameInterval)) {
if (std::chrono::steady_clock::now() - lastEncTP >
std::chrono::milliseconds(frameInterval)) {
lastEncTP = std::chrono::steady_clock::now();
_dev->inputYUV((char**)_buffer->get()->data, _buffer->get()->linesize, pts);
@ -326,9 +275,8 @@ void VideoStack::start()
});
}
void VideoStack::initBgColor()
{
//填充底色
void VideoStack::initBgColor() {
// 填充底色
auto R = 20;
auto G = 20;
auto B = 20;
@ -342,27 +290,19 @@ void VideoStack::initBgColor()
memset(_buffer->get()->data[2], V, _buffer->get()->linesize[2] * _height / 2);
}
Channel::Ptr VideoStackManager::getChannel(const std::string& id,
int width,
int height,
AVPixelFormat pixfmt)
{
Channel::Ptr VideoStackManager::getChannel(const std::string& id, int width, int height,
AVPixelFormat pixfmt) {
std::lock_guard<std::recursive_mutex> lock(_mx);
auto key = id + std::to_string(width) + std::to_string(height) + std::to_string(pixfmt);
auto it = _channelMap.find(key);
if (it != _channelMap.end()) {
return it->second->acquire();
}
if (it != _channelMap.end()) { return it->second->acquire(); }
return createChannel(id, width, height, pixfmt);
}
void VideoStackManager::unrefChannel(const std::string& id,
int width,
int height,
AVPixelFormat pixfmt)
{
void VideoStackManager::unrefChannel(const std::string& id, int width, int height,
AVPixelFormat pixfmt) {
std::lock_guard<std::recursive_mutex> lock(_mx);
auto key = id + std::to_string(width) + std::to_string(height) + std::to_string(pixfmt);
@ -377,8 +317,7 @@ void VideoStackManager::unrefChannel(const std::string& id,
}
}
int VideoStackManager::startVideoStack(const Json::Value& json)
{
int VideoStackManager::startVideoStack(const Json::Value& json) {
std::string id;
int width, height;
@ -392,8 +331,7 @@ int VideoStackManager::startVideoStack(const Json::Value& json)
auto stack = std::make_shared<VideoStack>(id, width, height);
for (auto& p : (*params)) {
if (!p)
continue;
if (!p) continue;
p->weak_chn = getChannel(p->id, p->width, p->height, p->pixfmt);
}
@ -405,13 +343,13 @@ int VideoStackManager::startVideoStack(const Json::Value& json)
return 0;
}
int VideoStackManager::resetVideoStack(const Json::Value& json)
{
int VideoStackManager::resetVideoStack(const Json::Value& json) {
std::string id;
int width, height;
auto params = parseParams(json, id, width, height);
if (!params) {
ErrorL << "Videostack parse params failed!";
return -1;
}
@ -419,15 +357,12 @@ int VideoStackManager::resetVideoStack(const Json::Value& json)
{
std::lock_guard<std::recursive_mutex> lock(_mx);
auto it = _stackMap.find(id);
if (it == _stackMap.end()) {
return -2;
}
if (it == _stackMap.end()) { return -2; }
stack = it->second;
}
for (auto& p : (*params)) {
if (!p)
continue;
if (!p) continue;
p->weak_chn = getChannel(p->id, p->width, p->height, p->pixfmt);
}
@ -435,8 +370,7 @@ int VideoStackManager::resetVideoStack(const Json::Value& json)
return 0;
}
int VideoStackManager::stopVideoStack(const std::string& id)
{
int VideoStackManager::stopVideoStack(const std::string& id) {
std::lock_guard<std::recursive_mutex> lock(_mx);
auto it = _stackMap.find(id);
if (it != _stackMap.end()) {
@ -447,28 +381,28 @@ int VideoStackManager::stopVideoStack(const std::string& id)
return -1;
}
mediakit::FFmpegFrame::Ptr VideoStackManager::getBgImg()
{
return _bgImg;
mediakit::FFmpegFrame::Ptr VideoStackManager::getBgImg() { return _bgImg; }
template<typename T> T getJsonValue(const Json::Value& json, const std::string& key) {
if (!json.isMember(key)) {
throw Json::LogicError("VideoStack parseParams missing required field: " + key);
}
return json[key].as<T>();
}
Params VideoStackManager::parseParams(const Json::Value& json,
std::string& id,
int& width,
int& height)
{
try {
id = json["id"].asString();
Params VideoStackManager::parseParams(const Json::Value& json, std::string& id, int& width,
int& height) {
width = json["width"].asInt();
height = json["height"].asInt();
id = getJsonValue<std::string>(json, "id");
width = getJsonValue<int>(json, "width");
height = getJsonValue<int>(json, "height");
int rows = getJsonValue<int>(json, "row");// 行数
int cols = getJsonValue<int>(json, "col");// 列数
int rows = json["row"].asInt(); //堆叠行数
int cols = json["col"].asInt(); //堆叠列数
float gapv = json["gapv"].asFloat(); //垂直间距
float gaph = json["gaph"].asFloat(); //水平间距
float gapv = json["gapv"].asFloat();// 垂直间距
float gaph = json["gaph"].asFloat();// 水平间距
//单个间距
// 单个间距
int gaphPix = static_cast<int>(round(width * gaph));
int gapvPix = static_cast<int>(round(height * gapv));
@ -493,31 +427,33 @@ Params VideoStackManager::parseParams(const Json::Value& json,
}
}
//判断是否需要合并格子 (焦点屏)
if (!json["span"].empty() && json.isMember("span")) {
// 判断是否需要合并格子 (焦点屏)
if (json.isMember("span") && json["span"].isArray() && json["span"].size() > 0) {
for (const auto& subArray : json["span"]) {
if (!subArray.isArray() || subArray.size() != 2) {
throw Json::LogicError("Incorrect 'span' sub-array format in JSON");
}
std::array<int, 4> mergePos;
int index = 0;
unsigned int index = 0;
for (const auto& innerArray : subArray) {
if (!innerArray.isArray() || innerArray.size() != 2) {
throw Json::LogicError("Incorrect 'span' inner-array format in JSON");
}
for (const auto& number : innerArray) {
if (index < mergePos.size()) {
mergePos[index++] = number.asInt();
}
if (index < mergePos.size()) { mergePos[index++] = number.asInt(); }
}
}
for (int i = mergePos[0]; i <= mergePos[2]; i++) {
for (int j = mergePos[1]; j <= mergePos[3]; j++) {
if (i == mergePos[0] && j == mergePos[1]) {
(*params)[i * cols + j]->width = (mergePos[3] - mergePos[1] + 1) * gridWidth + (mergePos[3] - mergePos[1]) * gapvPix;
(*params)[i * cols + j]->height = (mergePos[2] - mergePos[0] + 1) * gridHeight + (mergePos[2] - mergePos[0]) * gaphPix;
(*params)[i * cols + j]->width =
(mergePos[3] - mergePos[1] + 1) * gridWidth +
(mergePos[3] - mergePos[1]) * gapvPix;
(*params)[i * cols + j]->height =
(mergePos[2] - mergePos[0] + 1) * gridHeight +
(mergePos[2] - mergePos[0]) * gaphPix;
} else {
(*params)[i * cols + j] = nullptr;
}
@ -526,14 +462,9 @@ Params VideoStackManager::parseParams(const Json::Value& json,
}
}
return params;
} catch (const std::exception& e) {
ErrorL << "Videostack parse params failed! " << e.what();
return nullptr;
}
}
bool VideoStackManager::loadBgImg(const std::string& path)
{
bool VideoStackManager::loadBgImg(const std::string& path) {
_bgImg = std::make_shared<mediakit::FFmpegFrame>();
_bgImg->get()->width = 1280;
@ -543,21 +474,21 @@ bool VideoStackManager::loadBgImg(const std::string& path)
av_frame_get_buffer(_bgImg->get(), 32);
std::ifstream file(path, std::ios::binary);
if (!file.is_open()) {
return false;
}
if (!file.is_open()) { return false; }
file.read((char*)_bgImg->get()->data[0], _bgImg->get()->linesize[0] * _bgImg->get()->height); // Y
file.read((char*)_bgImg->get()->data[1], _bgImg->get()->linesize[1] * _bgImg->get()->height / 2); // U
file.read((char*)_bgImg->get()->data[2], _bgImg->get()->linesize[2] * _bgImg->get()->height / 2); // V
file.read((char*)_bgImg->get()->data[0],
_bgImg->get()->linesize[0] * _bgImg->get()->height);// Y
file.read((char*)_bgImg->get()->data[1],
_bgImg->get()->linesize[1] * _bgImg->get()->height / 2);// U
file.read((char*)_bgImg->get()->data[2],
_bgImg->get()->linesize[2] * _bgImg->get()->height / 2);// V
return true;
}
Channel::Ptr VideoStackManager::createChannel(const std::string& id,
int width,
int height,
AVPixelFormat pixfmt)
{
void VideoStackManager::clear() { _stackMap.clear(); }
Channel::Ptr VideoStackManager::createChannel(const std::string& id, int width, int height,
AVPixelFormat pixfmt) {
std::lock_guard<std::recursive_mutex> lock(_mx);
StackPlayer::Ptr player;
@ -568,24 +499,24 @@ Channel::Ptr VideoStackManager::createChannel(const std::string& id,
player = createPlayer(id);
}
auto refChn = std::make_shared<RefWrapper<Channel::Ptr>>(std::make_shared<Channel>(id, width, height, pixfmt));
auto refChn = std::make_shared<RefWrapper<Channel::Ptr>>(
std::make_shared<Channel>(id, width, height, pixfmt));
auto chn = refChn->acquire();
player->addChannel(chn);
_channelMap[id + std::to_string(width) + std::to_string(height) + std::to_string(pixfmt)] = refChn;
_channelMap[id + std::to_string(width) + std::to_string(height) + std::to_string(pixfmt)] =
refChn;
return chn;
}
StackPlayer::Ptr VideoStackManager::createPlayer(const std::string& id)
{
StackPlayer::Ptr VideoStackManager::createPlayer(const std::string& id) {
std::lock_guard<std::recursive_mutex> lock(_mx);
auto refPlayer = std::make_shared<RefWrapper<StackPlayer::Ptr>>(std::make_shared<StackPlayer>(id));
auto refPlayer =
std::make_shared<RefWrapper<StackPlayer::Ptr>>(std::make_shared<StackPlayer>(id));
_playerMap[id] = refPlayer;
auto player = refPlayer->acquire();
if (!id.empty()) {
player->play();
}
if (!id.empty()) { player->play(); }
return player;
}

View File

@ -5,29 +5,23 @@
#include "Player/MediaPlayer.h"
#include "json/json.h"
#include <mutex>
template <typename T>
class RefWrapper {
public:
template<typename T> class RefWrapper {
public:
using Ptr = std::shared_ptr<RefWrapper<T>>;
template <typename... Args>
explicit RefWrapper(Args&&... args)
: _rc(0)
, _entity(std::forward<Args>(args)...)
{
}
template<typename... Args>
explicit RefWrapper(Args&&... args) : _rc(0), _entity(std::forward<Args>(args)...) {}
T acquire()
{
T acquire() {
++_rc;
return _entity;
}
bool dispose() { return --_rc <= 0; }
private:
T _entity;
private:
std::atomic<int> _rc;
T _entity;
};
class Channel;
@ -40,7 +34,7 @@ struct Param {
int width = 0;
int height = 0;
AVPixelFormat pixfmt = AV_PIX_FMT_YUV420P;
std::string id {};
std::string id{};
// runtime
std::weak_ptr<Channel> weak_chn;
@ -52,7 +46,7 @@ struct Param {
using Params = std::shared_ptr<std::vector<Param::Ptr>>;
class Channel : public std::enable_shared_from_this<Channel> {
public:
public:
using Ptr = std::shared_ptr<Channel>;
Channel(const std::string& id, int width, int height, AVPixelFormat pixfmt);
@ -63,12 +57,12 @@ class Channel : public std::enable_shared_from_this<Channel> {
void fillBuffer(const Param::Ptr& p);
protected:
protected:
void forEachParam(const std::function<void(const Param::Ptr&)>& func);
void copyData(const mediakit::FFmpegFrame::Ptr& buf, const Param::Ptr& p);
private:
private:
std::string _id;
int _width;
int _height;
@ -84,13 +78,10 @@ class Channel : public std::enable_shared_from_this<Channel> {
};
class StackPlayer : public std::enable_shared_from_this<StackPlayer> {
public:
public:
using Ptr = std::shared_ptr<StackPlayer>;
StackPlayer(const std::string& url)
: _url(url)
{
}
StackPlayer(const std::string& url) : _url(url) {}
void addChannel(const std::weak_ptr<Channel>& chn);
@ -100,14 +91,14 @@ class StackPlayer : public std::enable_shared_from_this<StackPlayer> {
void onDisconnect();
protected:
protected:
void rePlay(const std::string& url);
private:
private:
std::string _url;
mediakit::MediaPlayer::Ptr _player;
//用于断线重连
// 用于断线重连
toolkit::Timer::Ptr _timer;
int _failedCount = 0;
@ -116,14 +107,11 @@ class StackPlayer : public std::enable_shared_from_this<StackPlayer> {
};
class VideoStack {
public:
public:
using Ptr = std::shared_ptr<VideoStack>;
VideoStack(const std::string& url,
int width = 1920,
int height = 1080,
AVPixelFormat pixfmt = AV_PIX_FMT_YUV420P,
float fps = 25.0,
VideoStack(const std::string& url, int width = 1920, int height = 1080,
AVPixelFormat pixfmt = AV_PIX_FMT_YUV420P, float fps = 25.0,
int bitRate = 2 * 1024 * 1024);
~VideoStack();
@ -132,15 +120,15 @@ class VideoStack {
void start();
protected:
protected:
void initBgColor();
public:
public:
Params _params;
mediakit::FFmpegFrame::Ptr _buffer;
private:
private:
std::string _id;
int _width;
int _height;
@ -156,47 +144,41 @@ class VideoStack {
};
class VideoStackManager {
public:
static VideoStackManager& Instance();
Channel::Ptr getChannel(const std::string& id,
int width,
int height,
AVPixelFormat pixfmt);
void unrefChannel(const std::string& id,
int width,
int height,
AVPixelFormat pixfmt);
public:
// 创建拼接流
int startVideoStack(const Json::Value& json);
// 停止拼接流
int stopVideoStack(const std::string& id);
// 可以在不断流的情况下,修改拼接流的配置(实现切换拼接屏内容)
int resetVideoStack(const Json::Value& json);
int stopVideoStack(const std::string& id);
public:
static VideoStackManager& Instance();
Channel::Ptr getChannel(const std::string& id, int width, int height, AVPixelFormat pixfmt);
void unrefChannel(const std::string& id, int width, int height, AVPixelFormat pixfmt);
bool loadBgImg(const std::string& path);
void clear();
mediakit::FFmpegFrame::Ptr getBgImg();
protected:
Params parseParams(const Json::Value& json,
std::string& id,
int& width,
int& height);
protected:
Params parseParams(const Json::Value& json, std::string& id, int& width, int& height);
protected:
Channel::Ptr createChannel(const std::string& id,
int width,
int height,
AVPixelFormat pixfmt);
protected:
Channel::Ptr createChannel(const std::string& id, int width, int height, AVPixelFormat pixfmt);
StackPlayer::Ptr createPlayer(const std::string& id);
private:
private:
mediakit::FFmpegFrame::Ptr _bgImg;
private:
private:
std::recursive_mutex _mx;
std::unordered_map<std::string, VideoStack::Ptr> _stackMap;

View File

@ -8,6 +8,7 @@
* may be found in the AUTHORS file in the root of the source tree.
*/
#include <exception>
#include <sys/stat.h>
#include <math.h>
#include <signal.h>
@ -1950,9 +1951,29 @@ void installWebApi() {
api_regist("/index/api/stack/start", [](API_ARGS_JSON_ASYNC) {
CHECK_SECRET();
auto ret = VideoStackManager::Instance().startVideoStack(allArgs.args);
int ret = 0;
try {
ret = VideoStackManager::Instance().startVideoStack(allArgs.args);
val["code"] = ret;
val["msg"] = ret ? "failed" : "success";
} catch (const std::exception &e) {
val["code"] = -1;
val["msg"] = e.what();
}
invoker(200, headerOut, val.toStyledString());
});
api_regist("/index/api/stack/reset", [](API_ARGS_JSON_ASYNC) {
CHECK_SECRET();
int ret = 0;
try {
auto ret = VideoStackManager::Instance().resetVideoStack(allArgs.args);
val["code"] = ret;
val["msg"] = ret ? "failed" : "success";
} catch (const std::exception &e) {
val["code"] = -1;
val["msg"] = e.what();
}
invoker(200, headerOut, val.toStyledString());
});
@ -1974,6 +1995,9 @@ void unInstallWebApi(){
#if defined(ENABLE_RTPPROXY)
s_rtp_server.clear();
#endif
#if defined(ENABLE_VIDEOSTACK) && defined(ENABLE_FFMPEG) && defined(ENABLE_X264)
VideoStackManager::Instance().clear();
#endif
NoticeCenter::Instance().delListener(&web_api_tag);
}

View File

@ -124,8 +124,9 @@ private:
void RtpServer::start(uint16_t local_port, const string &stream_id, TcpMode tcp_mode, const char *local_ip, bool re_use_port, uint32_t ssrc, int only_track, bool multiplex) {
//创建udp服务器
Socket::Ptr rtp_socket = Socket::createSocket(nullptr, true);
Socket::Ptr rtcp_socket = Socket::createSocket(nullptr, true);
auto poller = EventPollerPool::Instance().getPoller();
Socket::Ptr rtp_socket = Socket::createSocket(poller, true);
Socket::Ptr rtcp_socket = Socket::createSocket(poller, true);
if (local_port == 0) {
//随机端口rtp端口采用偶数
auto pair = std::make_pair(rtp_socket, rtcp_socket);
@ -181,14 +182,14 @@ void RtpServer::start(uint16_t local_port, const string &stream_id, TcpMode tcp_
TcpServer::Ptr tcp_server;
if (tcp_mode == PASSIVE || tcp_mode == ACTIVE) {
//创建tcp服务器
tcp_server = std::make_shared<TcpServer>();
auto processor = helper ? helper->getProcess() : nullptr;
// 如果共享同一个processor对象那么tcp server深圳为单线程模式确保线程安全
tcp_server = std::make_shared<TcpServer>(processor ? poller : nullptr);
(*tcp_server)[RtpSession::kStreamID] = stream_id;
(*tcp_server)[RtpSession::kSSRC] = ssrc;
(*tcp_server)[RtpSession::kOnlyTrack] = only_track;
if (tcp_mode == PASSIVE) {
weak_ptr<RtpServer> weak_self = shared_from_this();
auto processor = helper ? helper->getProcess() : nullptr;
tcp_server->start<RtpSession>(local_port, local_ip, 1024, [weak_self, processor](std::shared_ptr<RtpSession> &session) {
session->setRtpProcess(processor);
});

View File

@ -20,6 +20,28 @@
namespace mediakit {
// RTC配置项目
namespace Rtc {
//~ nack接收端(rtp发送端)
// Nack缓存包最早时间间隔
extern const std::string kMaxNackMS;
// Nack包检查间隔(包数量)
extern const std::string kRtpCacheCheckInterval;
//~ nack发送端(rtp接收端)
// 最大保留的rtp丢包状态个数
extern const std::string kNackMaxSize;
// rtp丢包状态最长保留时间
extern const std::string kNackMaxMS;
// nack最多请求重传次数
extern const std::string kNackMaxCount;
// nack重传频率rtt的倍数
extern const std::string kNackIntervalRatio;
// nack包中rtp个数减小此值可以让nack包响应更灵敏
extern const std::string kNackRtpSize;
} // namespace Rtc
class NackList {
public:
void pushBack(RtpPacket::Ptr rtp);

View File

@ -129,7 +129,12 @@ void WebRtcPlayer::sendConfigFrames(uint32_t before_seq, uint32_t sample_rate, u
if (!play_src) {
return;
}
auto video_track = std::dynamic_pointer_cast<mediakit::VideoTrack>(play_src->getTrack(mediakit::TrackVideo));
SdpParser parser(play_src->getSdp());
auto video_sdp = parser.getTrack(TrackVideo);
if (!video_sdp) {
return;
}
auto video_track = dynamic_pointer_cast<VideoTrack>(Factory::getTrackBySdp(video_sdp));
if (!video_track) {
return;
}
@ -146,9 +151,8 @@ void WebRtcPlayer::sendConfigFrames(uint32_t before_seq, uint32_t sample_rate, u
encoder->setRtpInfo(0, video_mtu, sample_rate, 0, 0, 0);
auto seq = before_seq - frames.size();
for (const auto &frame : video_track->getConfigFrames()) {
auto rtp = encoder->getRtpInfo().makeRtp(
TrackVideo, frame->data() + frame->prefixSize(), frame->size() - frame->prefixSize(), false, 0);
for (const auto &frame : frames) {
auto rtp = encoder->getRtpInfo().makeRtp(TrackVideo, frame->data() + frame->prefixSize(), frame->size() - frame->prefixSize(), false, 0);
auto header = rtp->getHeader();
header->seq = htons(seq++);
header->stamp = htonl(timestamp);

View File

@ -13,6 +13,7 @@
#include "Util/base64.h"
#include "Network/sockutil.h"
#include "Common/config.h"
#include "Nack.h"
#include "RtpExt.h"
#include "Rtcp/Rtcp.h"
#include "Rtcp/RtcpFCI.h"
@ -57,9 +58,6 @@ const string kMinBitrate = RTC_FIELD "min_bitrate";
// 数据通道设置
const string kDataChannelEcho = RTC_FIELD "datachannel_echo";
// rtp丢包状态最长保留时间
const string kNackMaxMS = RTC_FIELD "nackMaxMS";
static onceToken token([]() {
mINI::Instance()[kTimeOutSec] = 15;
mINI::Instance()[kExternIP] = "";
@ -72,8 +70,6 @@ static onceToken token([]() {
mINI::Instance()[kMinBitrate] = 0;
mINI::Instance()[kDataChannelEcho] = true;
mINI::Instance()[kNackMaxMS] = 3 * 1000;
});
} // namespace RTC
@ -806,7 +802,8 @@ public:
setOnSorted(std::move(cb));
//设置jitter buffer参数
GET_CONFIG(uint32_t, nack_maxms, Rtc::kNackMaxMS);
RtpTrackImp::setParams(1024, nack_maxms, 512);
GET_CONFIG(uint32_t, nack_max_rtp, Rtc::kNackMaxSize);
RtpTrackImp::setParams(nack_max_rtp, nack_maxms, nack_max_rtp / 2);
_nack_ctx.setOnNack([this](const FCI_NACK &nack) { onNack(nack); });
}

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@ -3,17 +3,25 @@
<head>
<title>ZLM RTC demo</title>
<script src="./ZLMRTCClient.js"></script>
<style>
video {
width: 40vw;
max-height: 50vh;
height: 22.5vw; /* 默认和宽:高为 16:9 */
object-fit: contain;
background-color: grey;
}
</style>
</head>
<body>
<div style="text-align: center;">
<div>
<video id='video' controls autoplay style="text-align:left;">
<video id='video' controls autoplay>
Your browser is too old which doesn't support HTML5 video.
</video>
<video id='selfVideo' controls autoplay style="text-align:right;">
<video id='selfVideo' controls autoplay>
Your browser is too old which doesn't support HTML5 video.
</video>
</div>
@ -27,7 +35,6 @@
</div>
<div style="float: right; width: 70%">
<p>
<label for="streamUrl">url:</label>
<input type="text" style="co; width:70%" id='streamUrl' value="http://192.168.1.101/index/api/webrtc?app=live&stream=xiong&type=play">
@ -82,9 +89,24 @@
<button onclick="send()">发送(send by datachannel)</button>
<button onclick="close()">关闭(close datachannel)</button>
<p>
<label for="videoDevice">videodevice:</label>
<select id="videoDevice">
</select>
</p>
<p>
<label for="audioDevice">audiodevice:</label>
<select id="audioDevice">
</select>
</p>
<p>
<label for="switchDevice">switchDevice:</label>
<input type="checkbox" id='switchDevice' checked="checked">
</p>
<button onclick="switchVideo()">切换视频(switch video)</button>
<button onclick="switchAudio()">切换音频(switch audio)</button>
</div>
</div>
@ -132,6 +154,24 @@
document.getElementById("resolution").add(opt,null);
});
ZLMRTCClient.GetAllMediaDevice().then(devices=>{
devices.forEach(device=>{
opt = document.createElement('option');
opt.text = device.label + ":"+device.deviceId
opt.value = JSON.stringify(device)
if(device.kind == 'videoinput'){
document.getElementById("videoDevice").add(opt,null)
}else if(device.kind == 'audioinput'){
document.getElementById("audioDevice").add(opt,null)
}else if(device.kind == 'audiooutput'){
// useless
//console.error('not support device')
}
})
}).catch(e=>{
console.error(e);
})
function start_play(){
let elr = document.getElementById("resolution");
let res = elr.options[elr.selectedIndex].text.match(/\d+/g);
@ -157,6 +197,21 @@
window.history.pushState(null, null, newUrl);
}
let elv = document.getElementById("videoDevice");
let ela = document.getElementById("audioDevice");
let vdevid = ''
let adevid = ''
if (!recvOnly) {
if (elv.selectedIndex !== -1) {
vdevid = JSON.parse(elv.options[elv.selectedIndex].value).deviceId
}
if (ela.selectedIndex !== -1) {
adevid = JSON.parse(ela.options[ela.selectedIndex].value).deviceId
}
}
player = new ZLMRTCClient.Endpoint(
{
element: document.getElementById('video'),// video 标签
@ -169,6 +224,8 @@
recvOnly:recvOnly,
resolution:{w,h},
usedatachannel:document.getElementById('datachannel').checked,
videoId:vdevid, // 不填选择默认的
audioId:adevid, // 不填选择默认的
}
);
@ -344,6 +401,39 @@
// get_media_list();
}, 5000);
function switchVideo(){
if(player){
// first arg bool false mean switch to screen video , second ignore
// true mean switch to video , second is camera deviceid
let elv = document.getElementById("videoDevice");
let vdevid = JSON.parse(elv.options[elv.selectedIndex].value).deviceId
player.switchVideo(document.getElementById('switchDevice').checked,vdevid).then(()=>{
// switch video successful
}).catch(e=>{
// switch video failed
console.error(e);
})
}
}
function switchAudio(){
if(player){
// first arg bool false mean switch to screen audio , second ignore
// true mean switch to mic , second is mic deviceid
let ela = document.getElementById("audioDevice");
let adevid = JSON.parse(ela.options[ela.selectedIndex].value).deviceId
player.switcAudio(document.getElementById('switchDevice').checked,adevid).then(()=>{
// switch audio successful
}).catch(e=>{
// switch audio failed
console.error(e);
})
}
}
</script>
</body>