VS2013 / MFC + FFmpeg實現錄屏


這是一個用FFmpeg實現的錄屏功能,其中包括錄制屏幕和聲音,錄制后的視屏為MPEG4 音頻為AAC。主要程序就是創建了三個線程(ScreenCapThreadProc、AudioCapThreadProc、OutPutThreadProc)分別用來捕獲桌面圖片、捕獲聲音,將捕獲到的桌面圖片和聲音寫成一個視頻文件。

ScreenCapThreadProc線程函數:

DWORD WINAPI ScreenCapThreadProc(LPVOID lpParam)
{
	AVPacket *packet = (AVPacket *)av_malloc(sizeof(AVPacket));
	int got_picture;
	AVFrame	*pFrame;
	pFrame = av_frame_alloc();

	AVFrame *picture = av_frame_alloc();
	int size = avpicture_get_size(pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt,
		pFormatCtx_Out->streams[VideoIndex]->codec->width, pFormatCtx_Out->streams[VideoIndex]->codec->height);
	picture_buf = new uint8_t[size];

	avpicture_fill((AVPicture *)picture, picture_buf,
		pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt,
		pFormatCtx_Out->streams[VideoIndex]->codec->width,
		pFormatCtx_Out->streams[VideoIndex]->codec->height);

	FILE *p = NULL;
	p = fopen("proc_test.yuv", "wb+");

	int height = pFormatCtx_Out->streams[VideoIndex]->codec->height;
	int width = pFormatCtx_Out->streams[VideoIndex]->codec->width;
	int y_size = height*width;
	while (bCap)
	{
		if (av_read_frame(pFormatCtx_Video, packet) < 0)
		{
			continue;
		}
		if (packet->stream_index == 0)
		{
			if (avcodec_decode_video2(pCodecCtx_Video, pFrame, &got_picture, packet) < 0)
			{
				printf("Decode Error.(解碼錯誤)\n");
				continue;
			}
			if (got_picture)
			{
				sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0,
					pFormatCtx_Out->streams[VideoIndex]->codec->height, picture->data, picture->linesize);

				if (av_fifo_space(fifo_video) >= size)
				{
					EnterCriticalSection(&VideoSection);
					av_fifo_generic_write(fifo_video, picture->data[0], y_size, NULL);
					av_fifo_generic_write(fifo_video, picture->data[1], y_size / 4, NULL);
					av_fifo_generic_write(fifo_video, picture->data[2], y_size / 4, NULL);
					LeaveCriticalSection(&VideoSection);
				}
			}
		}
	}
	av_frame_free(&pFrame);
	av_frame_free(&picture);
	delete[] picture_buf;
	return 0;
}

AudioCapThreadProc線程函數:

DWORD WINAPI AudioCapThreadProc(LPVOID lpParam)
{
	AVPacket pkt;
	AVFrame *frame;
	frame = av_frame_alloc();
	int gotframe;
	while (bCap)
	{
		if (av_read_frame(pFormatCtx_Audio, &pkt) < 0)
		{
			continue;
		}

		if (avcodec_decode_audio4(pFormatCtx_Audio->streams[0]->codec, frame, &gotframe, &pkt) < 0)
		{
			av_frame_free(&frame);
			printf("can not decoder a frame");
			break;
		}
		av_free_packet(&pkt);

		if (!gotframe)
		{
			continue;//沒有獲取到數據,繼續下一次
		}

		if (NULL == fifo_audio)
		{
			fifo_audio = av_audio_fifo_alloc(pFormatCtx_Audio->streams[0]->codec->sample_fmt,
				pFormatCtx_Audio->streams[0]->codec->channels, 30 * frame->nb_samples);
		}

		int buf_space = av_audio_fifo_space(fifo_audio);
		if (av_audio_fifo_space(fifo_audio) >= frame->nb_samples)
		{
			EnterCriticalSection(&AudioSection);
			av_audio_fifo_write(fifo_audio, (void **)frame->data, frame->nb_samples);
			LeaveCriticalSection(&AudioSection);
		}
	}
	av_frame_free(&frame);
	return 0;
}

OutPutThreadProc線程函數:

DWORD WINAPI OutPutThreadProc(LPVOID lpParam){

	AVFrame *picture = av_frame_alloc();
	int size = avpicture_get_size(pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt,
		pFormatCtx_Out->streams[VideoIndex]->codec->width, pFormatCtx_Out->streams[VideoIndex]->codec->height);
	picture_buf = new uint8_t[size];

	avpicture_fill((AVPicture *)picture, picture_buf,
		pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt,
		pFormatCtx_Out->streams[VideoIndex]->codec->width,
		pFormatCtx_Out->streams[VideoIndex]->codec->height);



	int64_t cur_pts_v = 0, cur_pts_a = 0;
	int VideoFrameIndex = 0, AudioFrameIndex = 0;

	while (bCap)
	{
		if (flagThread && bCap)
		{
			bCap = false;
			Sleep(2000);//簡單的用sleep等待采集線程關閉
			continue;
		}
		if (fifo_audio && fifo_video)
		{
			int sizeAudio = av_audio_fifo_size(fifo_audio);
			int sizeVideo = av_fifo_size(fifo_video);
			//緩存數據寫完就結束循環
			if (av_audio_fifo_size(fifo_audio) <= pFormatCtx_Out->streams[AudioIndex]->codec->frame_size &&
				av_fifo_size(fifo_video) <= frame_size && !bCap)
			{
				break;
			}
		}

		if (av_compare_ts(cur_pts_v, pFormatCtx_Out->streams[VideoIndex]->time_base,
			cur_pts_a, pFormatCtx_Out->streams[AudioIndex]->time_base) <= 0)
		{
			//read data from fifo
			if (av_fifo_size(fifo_video) < frame_size && !bCap)
			{
				cur_pts_v = 0x7fffffffffffffff;
			}
			if (av_fifo_size(fifo_video) >= size)
			{
				EnterCriticalSection(&VideoSection);
				av_fifo_generic_read(fifo_video, picture_buf, size, NULL);
				LeaveCriticalSection(&VideoSection);

				avpicture_fill((AVPicture *)picture, picture_buf,
					pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt,
					pFormatCtx_Out->streams[VideoIndex]->codec->width,
					pFormatCtx_Out->streams[VideoIndex]->codec->height);

				//pts = n * ((1 / timbase)/ fps);
				picture->pts = VideoFrameIndex * ((pFormatCtx_Video->streams[0]->time_base.den / pFormatCtx_Video->streams[0]->time_base.num) / 15);

				int got_picture = 0;
				AVPacket pkt;
				av_init_packet(&pkt);

				pkt.data = NULL;
				pkt.size = 0;
				int ret = avcodec_encode_video2(pFormatCtx_Out->streams[VideoIndex]->codec, &pkt, picture, &got_picture);
				if (ret < 0)
				{
					//編碼錯誤,不理會此幀
					continue;
				}

				if (got_picture == 1)
				{
					pkt.stream_index = VideoIndex;
					pkt.pts = av_rescale_q_rnd(pkt.pts, pFormatCtx_Video->streams[0]->time_base,
						pFormatCtx_Out->streams[VideoIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
					pkt.dts = av_rescale_q_rnd(pkt.dts, pFormatCtx_Video->streams[0]->time_base,
						pFormatCtx_Out->streams[VideoIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));

					pkt.duration = ((pFormatCtx_Out->streams[0]->time_base.den / pFormatCtx_Out->streams[0]->time_base.num) / 15);

					cur_pts_v = pkt.pts;

					ret = av_interleaved_write_frame(pFormatCtx_Out, &pkt);
					//delete[] pkt.data;
					av_free_packet(&pkt);
				}
				VideoFrameIndex++;
			}
		}
		else
		{
			if (NULL == fifo_audio)
			{
				continue;//還未初始化fifo
			}
			if (av_audio_fifo_size(fifo_audio) < pFormatCtx_Out->streams[AudioIndex]->codec->frame_size && !bCap)
			{
				cur_pts_a = 0x7fffffffffffffff;
			}
			if (av_audio_fifo_size(fifo_audio) >=
				(pFormatCtx_Out->streams[AudioIndex]->codec->frame_size > 0 ? pFormatCtx_Out->streams[AudioIndex]->codec->frame_size : 1024))
			{
				AVFrame *frame;
				frame = av_frame_alloc();
				frame->nb_samples = pFormatCtx_Out->streams[AudioIndex]->codec->frame_size > 0 ? pFormatCtx_Out->streams[AudioIndex]->codec->frame_size : 1024;
				frame->channel_layout = pFormatCtx_Out->streams[AudioIndex]->codec->channel_layout;
				frame->format = pFormatCtx_Out->streams[AudioIndex]->codec->sample_fmt;
				frame->sample_rate = pFormatCtx_Out->streams[AudioIndex]->codec->sample_rate;
				av_frame_get_buffer(frame, 0);

				EnterCriticalSection(&AudioSection);
				av_audio_fifo_read(fifo_audio, (void **)frame->data,
					(pFormatCtx_Out->streams[AudioIndex]->codec->frame_size > 0 ? pFormatCtx_Out->streams[AudioIndex]->codec->frame_size : 1024));
				LeaveCriticalSection(&AudioSection);

				if (pFormatCtx_Out->streams[0]->codec->sample_fmt != pFormatCtx_Audio->streams[AudioIndex]->codec->sample_fmt
					|| pFormatCtx_Out->streams[0]->codec->channels != pFormatCtx_Audio->streams[AudioIndex]->codec->channels
					|| pFormatCtx_Out->streams[0]->codec->sample_rate != pFormatCtx_Audio->streams[AudioIndex]->codec->sample_rate)
				{
					//如果輸入和輸出的音頻格式不一樣 需要重采樣,這里是一樣的就沒做
				}

				AVPacket pkt_out;
				av_init_packet(&pkt_out);
				int got_picture = -1;
				pkt_out.data = NULL;
				pkt_out.size = 0;

				frame->pts = AudioFrameIndex * pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;
				if (avcodec_encode_audio2(pFormatCtx_Out->streams[AudioIndex]->codec, &pkt_out, frame, &got_picture) < 0)
				{
					printf("can not decoder a frame");
				}
				av_frame_free(&frame);
				if (got_picture)
				{
					pkt_out.stream_index = AudioIndex;
					pkt_out.pts = AudioFrameIndex * pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;
					pkt_out.dts = AudioFrameIndex * pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;
					pkt_out.duration = pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;

					cur_pts_a = pkt_out.pts;

					int ret = av_interleaved_write_frame(pFormatCtx_Out, &pkt_out);
					av_free_packet(&pkt_out);
				}
				AudioFrameIndex++;
			}
		}
	}

	av_write_trailer(pFormatCtx_Out);

	avio_close(pFormatCtx_Out->pb);
	avformat_free_context(pFormatCtx_Out);

	if (pFormatCtx_Video != NULL)
	{
		avformat_close_input(&pFormatCtx_Video);
	
		pFormatCtx_Video = NULL;
	}
	if (pFormatCtx_Audio != NULL)
	{
		avformat_close_input(&pFormatCtx_Audio);
		pFormatCtx_Audio = NULL;
	}

	return 0;
}
最后結果:

運行工程需要注意:在AudioCapThreadProc線程函數中以Direct Show的方式打開設備捕獲聲音時,需要使用ffmpeg命令顯示本機的設備名稱

ffmpeg命令:

ffmpeg -list_devices true -f dshow -i dummy

音頻設備出現亂碼,

解決辦法1:把亂碼ANSI轉UTF-8

解決辦法2:還有一種更簡單的方式查看設備的名稱。即不使用FFmpeg查看系統DirectShow輸入設備的名稱,而使用DirectShow SDK自帶的工具GraphEdit(或者網上下一個GraphStudioNext)查看輸入名稱。

打開graphstudionext.exe,“圖像->插入濾鏡”

選擇Audio Capture Sources來查看音頻輸入設備的簡體中文名稱


csdn源碼下載:http://download.csdn.net/detail/davebobo/9492724






注意!

本站转载的文章为个人学习借鉴使用,本站对版权不负任何法律责任。如果侵犯了您的隐私权益,请联系我们删除。



 
粤ICP备14056181号  © 2014-2020 ITdaan.com