Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
O
OpenBoard
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
lifo
Nicolas Ollinger
OpenBoard
Commits
11c207d7
Commit
11c207d7
authored
May 04, 2016
by
Craig Watson
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Podcasts on Linux: added audio support
parent
a2fb735b
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
608 additions
and
63 deletions
+608
-63
UBPodcastController.cpp
src/podcast/UBPodcastController.cpp
+3
-0
UBFFmpegVideoEncoder.cpp
src/podcast/ffmpeg/UBFFmpegVideoEncoder.cpp
+300
-46
UBFFmpegVideoEncoder.h
src/podcast/ffmpeg/UBFFmpegVideoEncoder.h
+33
-15
UBMicrophoneInput.cpp
src/podcast/ffmpeg/UBMicrophoneInput.cpp
+211
-0
UBMicrophoneInput.h
src/podcast/ffmpeg/UBMicrophoneInput.h
+57
-0
podcast.pri
src/podcast/podcast.pri
+4
-2
No files found.
src/podcast/UBPodcastController.cpp
View file @
11c207d7
...
@@ -66,6 +66,7 @@
...
@@ -66,6 +66,7 @@
#include "quicktime/UBAudioQueueRecorder.h"
#include "quicktime/UBAudioQueueRecorder.h"
#elif defined(Q_OS_LINUX)
#elif defined(Q_OS_LINUX)
#include "ffmpeg/UBFFmpegVideoEncoder.h"
#include "ffmpeg/UBFFmpegVideoEncoder.h"
#include "ffmpeg/UBMicrophoneInput.h"
#endif
#endif
#include "core/memcheck.h"
#include "core/memcheck.h"
...
@@ -808,6 +809,8 @@ QStringList UBPodcastController::audioRecordingDevices()
...
@@ -808,6 +809,8 @@ QStringList UBPodcastController::audioRecordingDevices()
devices
=
UBWaveRecorder
::
waveInDevices
();
devices
=
UBWaveRecorder
::
waveInDevices
();
#elif defined(Q_OS_OSX)
#elif defined(Q_OS_OSX)
devices
=
UBAudioQueueRecorder
::
waveInDevices
();
devices
=
UBAudioQueueRecorder
::
waveInDevices
();
#elif defined(Q_OS_LINUX)
devices
=
UBMicrophoneInput
::
availableDevicesNames
();
#endif
#endif
return
devices
;
return
devices
;
...
...
src/podcast/ffmpeg/UBFFmpegVideoEncoder.cpp
View file @
11c207d7
...
@@ -20,18 +20,32 @@ QString avErrorToQString(int errnum)
...
@@ -20,18 +20,32 @@ QString avErrorToQString(int errnum)
*
*
* This class provides an interface between the screencast controller and the ffmpeg
* This class provides an interface between the screencast controller and the ffmpeg
* back-end. It initializes the audio and video encoders and frees them when done;
* back-end. It initializes the audio and video encoders and frees them when done;
*
worker threads handle the actual encod
ing of frames.
*
a worker thread handles the actual encoding and writ
ing of frames.
*
*
*/
*/
UBFFmpegVideoEncoder
::
UBFFmpegVideoEncoder
(
QObject
*
parent
)
UBFFmpegVideoEncoder
::
UBFFmpegVideoEncoder
(
QObject
*
parent
)
:
UBAbstractVideoEncoder
(
parent
)
:
UBAbstractVideoEncoder
(
parent
)
,
mOutputFormatContext
(
NULL
)
,
mOutputFormatContext
(
NULL
)
,
mSwsContext
(
NULL
)
,
mSwsContext
(
NULL
)
,
mFile
(
NULL
)
,
mShouldRecordAudio
(
true
)
,
mAudioInput
(
NULL
)
,
mSwrContext
(
NULL
)
,
mAudioOutBuffer
(
NULL
)
,
mAudioSampleRate
(
44100
)
,
mAudioFrameCount
(
0
)
{
{
if
(
mShouldRecordAudio
)
{
mAudioInput
=
new
UBMicrophoneInput
();
mTimebase
=
100
*
framesPerSecond
();
connect
(
mAudioInput
,
SIGNAL
(
audioLevelChanged
(
quint8
)),
qDebug
()
<<
"timebase: "
<<
mTimebase
;
this
,
SIGNAL
(
audioLevelChanged
(
quint8
)));
connect
(
mAudioInput
,
SIGNAL
(
dataAvailable
(
QByteArray
)),
this
,
SLOT
(
onAudioAvailable
(
QByteArray
)));
}
mVideoTimebase
=
100
*
framesPerSecond
();
qDebug
()
<<
"timebase: "
<<
mVideoTimebase
;
mVideoEncoderThread
=
new
QThread
;
mVideoEncoderThread
=
new
QThread
;
mVideoWorker
=
new
UBFFmpegVideoEncoderWorker
(
this
);
mVideoWorker
=
new
UBFFmpegVideoEncoderWorker
(
this
);
...
@@ -58,6 +72,8 @@ UBFFmpegVideoEncoder::~UBFFmpegVideoEncoder()
...
@@ -58,6 +72,8 @@ UBFFmpegVideoEncoder::~UBFFmpegVideoEncoder()
if
(
mVideoEncoderThread
)
if
(
mVideoEncoderThread
)
delete
mVideoEncoderThread
;
delete
mVideoEncoderThread
;
if
(
mAudioInput
)
delete
mAudioInput
;
}
}
void
UBFFmpegVideoEncoder
::
setLastErrorMessage
(
const
QString
&
pMessage
)
void
UBFFmpegVideoEncoder
::
setLastErrorMessage
(
const
QString
&
pMessage
)
...
@@ -66,12 +82,16 @@ void UBFFmpegVideoEncoder::setLastErrorMessage(const QString& pMessage)
...
@@ -66,12 +82,16 @@ void UBFFmpegVideoEncoder::setLastErrorMessage(const QString& pMessage)
mLastErrorMessage
=
pMessage
;
mLastErrorMessage
=
pMessage
;
}
}
bool
UBFFmpegVideoEncoder
::
start
()
bool
UBFFmpegVideoEncoder
::
start
()
{
{
bool
initialized
=
init
();
bool
initialized
=
init
();
if
(
initialized
)
if
(
initialized
)
{
mVideoEncoderThread
->
start
();
mVideoEncoderThread
->
start
();
if
(
mShouldRecordAudio
)
mAudioInput
->
start
();
}
return
initialized
;
return
initialized
;
}
}
...
@@ -82,12 +102,14 @@ bool UBFFmpegVideoEncoder::stop()
...
@@ -82,12 +102,14 @@ bool UBFFmpegVideoEncoder::stop()
mVideoWorker
->
stopEncoding
();
mVideoWorker
->
stopEncoding
();
if
(
mShouldRecordAudio
)
mAudioInput
->
stop
();
return
true
;
return
true
;
}
}
bool
UBFFmpegVideoEncoder
::
init
()
bool
UBFFmpegVideoEncoder
::
init
()
{
{
// Initialize ffmpeg lib
av_register_all
();
av_register_all
();
avcodec_register_all
();
avcodec_register_all
();
...
@@ -96,7 +118,6 @@ bool UBFFmpegVideoEncoder::init()
...
@@ -96,7 +118,6 @@ bool UBFFmpegVideoEncoder::init()
// Output format and context
// Output format and context
// --------------------------------------
// --------------------------------------
if
(
avformat_alloc_output_context2
(
&
mOutputFormatContext
,
NULL
,
if
(
avformat_alloc_output_context2
(
&
mOutputFormatContext
,
NULL
,
"mp4"
,
NULL
)
<
0
)
"mp4"
,
NULL
)
<
0
)
{
{
...
@@ -109,6 +130,7 @@ bool UBFFmpegVideoEncoder::init()
...
@@ -109,6 +130,7 @@ bool UBFFmpegVideoEncoder::init()
// Video codec and context
// Video codec and context
// -------------------------------------
// -------------------------------------
mVideoStream
=
avformat_new_stream
(
mOutputFormatContext
,
0
);
AVCodec
*
videoCodec
=
avcodec_find_encoder
(
mOutputFormatContext
->
oformat
->
video_codec
);
AVCodec
*
videoCodec
=
avcodec_find_encoder
(
mOutputFormatContext
->
oformat
->
video_codec
);
if
(
!
videoCodec
)
{
if
(
!
videoCodec
)
{
...
@@ -116,16 +138,12 @@ bool UBFFmpegVideoEncoder::init()
...
@@ -116,16 +138,12 @@ bool UBFFmpegVideoEncoder::init()
return
false
;
return
false
;
}
}
mVideoStream
=
avformat_new_stream
(
mOutputFormatContext
,
0
);
mVideoStream
->
time_base
=
{
1
,
mTimebase
};
avcodec_get_context_defaults3
(
mVideoStream
->
codec
,
videoCodec
);
AVCodecContext
*
c
=
avcodec_alloc_context3
(
videoCodec
);
AVCodecContext
*
c
=
avcodec_alloc_context3
(
videoCodec
);
c
->
bit_rate
=
videoBitsPerSecond
();
c
->
bit_rate
=
videoBitsPerSecond
();
c
->
width
=
videoSize
().
width
();
c
->
width
=
videoSize
().
width
();
c
->
height
=
videoSize
().
height
();
c
->
height
=
videoSize
().
height
();
c
->
time_base
=
{
1
,
mTimebase
};
c
->
time_base
=
{
1
,
m
Video
Timebase
};
c
->
gop_size
=
10
;
c
->
gop_size
=
10
;
c
->
max_b_frames
=
0
;
c
->
max_b_frames
=
0
;
c
->
pix_fmt
=
AV_PIX_FMT_YUV420P
;
c
->
pix_fmt
=
AV_PIX_FMT_YUV420P
;
...
@@ -161,10 +179,77 @@ bool UBFFmpegVideoEncoder::init()
...
@@ -161,10 +179,77 @@ bool UBFFmpegVideoEncoder::init()
// Audio codec and context
// Audio codec and context
// -------------------------------------
// -------------------------------------
/*
if
(
mShouldRecordAudio
)
{
// Microphone input
if
(
!
mAudioInput
->
init
())
{
setLastErrorMessage
(
"Couldn't initialize audio input"
);
return
false
;
}
int
inChannelCount
=
mAudioInput
->
channelCount
();
int
inSampleRate
=
mAudioInput
->
sampleRate
();
int
inSampleSize
=
mAudioInput
->
sampleSize
();
qDebug
()
<<
"inChannelCount = "
<<
inChannelCount
;
qDebug
()
<<
"inSampleRate = "
<<
inSampleRate
;
qDebug
()
<<
"inSampleSize = "
<<
inSampleSize
;
// Codec
AVCodec
*
audioCodec
=
avcodec_find_encoder
(
mOutputFormatContext
->
oformat
->
audio_codec
);
AVCodec
*
audioCodec
=
avcodec_find_encoder
(
mOutputFormatContext
->
oformat
->
audio_codec
);
if
(
!
audioCodec
)
{
setLastErrorMessage
(
"Audio codec not found"
);
return
false
;
}
mAudioStream
=
avformat_new_stream
(
mOutputFormatContext
,
audioCodec
);
mAudioStream
=
avformat_new_stream
(
mOutputFormatContext
,
audioCodec
);
*/
mAudioStream
->
id
=
mOutputFormatContext
->
nb_streams
-
1
;
c
=
mAudioStream
->
codec
;
c
->
bit_rate
=
96000
;
c
->
sample_fmt
=
audioCodec
->
sample_fmts
[
0
];
// FLTP by default for AAC
c
->
sample_rate
=
mAudioSampleRate
;
c
->
channels
=
2
;
c
->
channel_layout
=
av_get_default_channel_layout
(
c
->
channels
);
c
->
profile
=
FF_PROFILE_AAC_MAIN
;
c
->
time_base
=
{
1
,
mAudioSampleRate
};
if
(
mOutputFormatContext
->
oformat
->
flags
&
AVFMT_GLOBALHEADER
)
c
->
flags
|=
AV_CODEC_FLAG_GLOBAL_HEADER
;
ret
=
avcodec_open2
(
c
,
audioCodec
,
NULL
);
if
(
ret
<
0
)
{
setLastErrorMessage
(
QString
(
"Couldn't open audio codec: "
)
+
avErrorToQString
(
ret
));
return
false
;
}
// Resampling / format converting context
mSwrContext
=
swr_alloc
();
if
(
!
mSwrContext
)
{
setLastErrorMessage
(
"Could not allocate resampler context"
);
return
false
;
}
av_opt_set_int
(
mSwrContext
,
"in_channel_count"
,
inChannelCount
,
0
);
av_opt_set_int
(
mSwrContext
,
"in_sample_rate"
,
inSampleRate
,
0
);
av_opt_set_sample_fmt
(
mSwrContext
,
"in_sample_fmt"
,
(
AVSampleFormat
)
mAudioInput
->
sampleFormat
(),
0
);
av_opt_set_int
(
mSwrContext
,
"out_channel_count"
,
c
->
channels
,
0
);
av_opt_set_int
(
mSwrContext
,
"out_sample_rate"
,
c
->
sample_rate
,
0
);
av_opt_set_sample_fmt
(
mSwrContext
,
"out_sample_fmt"
,
c
->
sample_fmt
,
0
);
ret
=
swr_init
(
mSwrContext
);
if
(
ret
<
0
)
{
setLastErrorMessage
(
QString
(
"Couldn't initialize the resampling context: "
)
+
avErrorToQString
(
ret
));
return
false
;
}
// Buffer for resampled/converted audio
mAudioOutBuffer
=
av_audio_fifo_alloc
(
c
->
sample_fmt
,
c
->
channels
,
c
->
frame_size
);
}
// Open the output file
// Open the output file
...
@@ -185,8 +270,14 @@ bool UBFFmpegVideoEncoder::init()
...
@@ -185,8 +270,14 @@ bool UBFFmpegVideoEncoder::init()
return
true
;
return
true
;
}
}
/**
* This function should be called every time a new "screenshot" is ready.
* The image is converted to the right format and sent to the encoder.
*/
void
UBFFmpegVideoEncoder
::
newPixmap
(
const
QImage
&
pImage
,
long
timestamp
)
void
UBFFmpegVideoEncoder
::
newPixmap
(
const
QImage
&
pImage
,
long
timestamp
)
{
{
// really necessary?
static
bool
isFirstFrame
=
true
;
static
bool
isFirstFrame
=
true
;
if
(
isFirstFrame
)
{
if
(
isFirstFrame
)
{
timestamp
=
0
;
timestamp
=
0
;
...
@@ -201,16 +292,16 @@ void UBFFmpegVideoEncoder::newPixmap(const QImage &pImage, long timestamp)
...
@@ -201,16 +292,16 @@ void UBFFmpegVideoEncoder::newPixmap(const QImage &pImage, long timestamp)
else
{
else
{
// First send any queued frames, then the latest one
// First send any queued frames, then the latest one
while
(
!
mPendingFrames
.
isEmpty
())
{
while
(
!
mPendingFrames
.
isEmpty
())
{
AVFrame
*
avFrame
=
convertFrame
(
mPendingFrames
.
dequeue
());
AVFrame
*
avFrame
=
convert
Image
Frame
(
mPendingFrames
.
dequeue
());
if
(
avFrame
)
if
(
avFrame
)
mVideoWorker
->
queueFrame
(
avFrame
);
mVideoWorker
->
queueFrame
(
avFrame
);
}
}
// note: if converting the frame turns out to be too slow to do here, it
// note: if converting the frame turns out to be too slow to do here, it
// can always be done from the worker thread (in th
ta
case,
// can always be done from the worker thread (in th
at
case,
// the worker's queue would contain ImageFrames rather than AVFrames)
// the worker's queue would contain ImageFrames rather than AVFrames)
AVFrame
*
avFrame
=
convertFrame
({
pImage
,
timestamp
});
AVFrame
*
avFrame
=
convert
Image
Frame
({
pImage
,
timestamp
});
if
(
avFrame
)
if
(
avFrame
)
mVideoWorker
->
queueFrame
(
avFrame
);
mVideoWorker
->
queueFrame
(
avFrame
);
...
@@ -219,17 +310,18 @@ void UBFFmpegVideoEncoder::newPixmap(const QImage &pImage, long timestamp)
...
@@ -219,17 +310,18 @@ void UBFFmpegVideoEncoder::newPixmap(const QImage &pImage, long timestamp)
}
}
}
}
/** Convert a frame consisting of a QImage and timestamp to an AVFrame
/**
* Convert a frame consisting of a QImage and timestamp to an AVFrame
* with the right pixel format and PTS
* with the right pixel format and PTS
*/
*/
AVFrame
*
UBFFmpegVideoEncoder
::
convertFrame
(
ImageFrame
frame
)
AVFrame
*
UBFFmpegVideoEncoder
::
convert
Image
Frame
(
ImageFrame
frame
)
{
{
AVFrame
*
avFrame
=
av_frame_alloc
();
AVFrame
*
avFrame
=
av_frame_alloc
();
avFrame
->
format
=
mVideoStream
->
codec
->
pix_fmt
;
avFrame
->
format
=
mVideoStream
->
codec
->
pix_fmt
;
avFrame
->
width
=
mVideoStream
->
codec
->
width
;
avFrame
->
width
=
mVideoStream
->
codec
->
width
;
avFrame
->
height
=
mVideoStream
->
codec
->
height
;
avFrame
->
height
=
mVideoStream
->
codec
->
height
;
avFrame
->
pts
=
mTimebase
*
frame
.
timestamp
/
1000
;
avFrame
->
pts
=
m
Video
Timebase
*
frame
.
timestamp
/
1000
;
const
uchar
*
rgbImage
=
frame
.
image
.
bits
();
const
uchar
*
rgbImage
=
frame
.
image
.
bits
();
...
@@ -254,6 +346,93 @@ AVFrame* UBFFmpegVideoEncoder::convertFrame(ImageFrame frame)
...
@@ -254,6 +346,93 @@ AVFrame* UBFFmpegVideoEncoder::convertFrame(ImageFrame frame)
return
avFrame
;
return
avFrame
;
}
}
void
UBFFmpegVideoEncoder
::
onAudioAvailable
(
QByteArray
data
)
{
if
(
!
data
.
isEmpty
())
processAudio
(
data
);
}
/**
* Resample and convert audio to match the encoder's settings and queue the
* output. If enough output data is available, it is packaged into AVFrames and
* sent to the encoder thread.
*/
void
UBFFmpegVideoEncoder
::
processAudio
(
QByteArray
&
data
)
{
int
ret
;
AVCodecContext
*
codecContext
=
mAudioStream
->
codec
;
const
char
*
inSamples
=
data
.
constData
();
// The number of samples (per channel) in the input
int
inSamplesCount
=
data
.
size
()
/
((
mAudioInput
->
sampleSize
()
/
8
)
*
mAudioInput
->
channelCount
());
// The number of samples we will get after conversion
int
outSamplesCount
=
swr_get_out_samples
(
mSwrContext
,
inSamplesCount
);
// Allocate output samples
uint8_t
**
outSamples
=
NULL
;
int
outSamplesLineSize
;
ret
=
av_samples_alloc_array_and_samples
(
&
outSamples
,
&
outSamplesLineSize
,
codecContext
->
channels
,
outSamplesCount
,
codecContext
->
sample_fmt
,
0
);
if
(
ret
<
0
)
{
qDebug
()
<<
"Could not allocate audio samples"
<<
avErrorToQString
(
ret
);
return
;
}
// Convert to destination format
ret
=
swr_convert
(
mSwrContext
,
outSamples
,
outSamplesCount
,
(
const
uint8_t
**
)
&
inSamples
,
inSamplesCount
);
if
(
ret
<
0
)
{
qDebug
()
<<
"Error converting audio samples: "
<<
avErrorToQString
(
ret
);
return
;
}
// Append the converted samples to the out buffer.
ret
=
av_audio_fifo_write
(
mAudioOutBuffer
,
(
void
**
)
outSamples
,
outSamplesCount
);
if
(
ret
<
0
)
{
qDebug
()
<<
"Could not write to FIFO queue: "
<<
avErrorToQString
(
ret
);
return
;
}
// Keep the data queued until next call if the encoder thread isn't running
if
(
!
mVideoWorker
->
isRunning
())
return
;
bool
framesAdded
=
false
;
while
(
av_audio_fifo_size
(
mAudioOutBuffer
)
>
codecContext
->
frame_size
)
{
AVFrame
*
avFrame
=
av_frame_alloc
();
avFrame
->
nb_samples
=
codecContext
->
frame_size
;
avFrame
->
channel_layout
=
codecContext
->
channel_layout
;
avFrame
->
format
=
codecContext
->
sample_fmt
;
avFrame
->
sample_rate
=
codecContext
->
sample_rate
;
avFrame
->
pts
=
mAudioFrameCount
;
ret
=
av_frame_get_buffer
(
avFrame
,
0
);
if
(
ret
<
0
)
{
qDebug
()
<<
"Couldn't allocate frame: "
<<
avErrorToQString
(
ret
);
break
;
}
ret
=
av_audio_fifo_read
(
mAudioOutBuffer
,
(
void
**
)
avFrame
->
data
,
codecContext
->
frame_size
);
if
(
ret
<
0
)
qDebug
()
<<
"Could not read from FIFO queue: "
<<
avErrorToQString
(
ret
);
else
{
mAudioFrameCount
+=
codecContext
->
frame_size
;
mVideoWorker
->
queueAudio
(
avFrame
);
framesAdded
=
true
;
}
}
if
(
framesAdded
)
mVideoWorker
->
mWaitCondition
.
wakeAll
();
}
void
UBFFmpegVideoEncoder
::
finishEncoding
()
void
UBFFmpegVideoEncoder
::
finishEncoding
()
{
{
qDebug
()
<<
"VideoEncoder::finishEncoding called"
;
qDebug
()
<<
"VideoEncoder::finishEncoding called"
;
...
@@ -264,7 +443,7 @@ void UBFFmpegVideoEncoder::finishEncoding()
...
@@ -264,7 +443,7 @@ void UBFFmpegVideoEncoder::finishEncoding()
do
{
do
{
// TODO: get rid of duplicated code (videoWorker does almost exactly this during encoding)
// TODO: get rid of duplicated code (videoWorker does almost exactly this during encoding)
AVPacket
*
packet
=
mVideoWorker
->
mPacket
;
AVPacket
*
packet
=
mVideoWorker
->
m
Video
Packet
;
if
(
avcodec_encode_video2
(
mVideoStream
->
codec
,
packet
,
NULL
,
&
gotOutput
)
<
0
)
{
if
(
avcodec_encode_video2
(
mVideoStream
->
codec
,
packet
,
NULL
,
&
gotOutput
)
<
0
)
{
setLastErrorMessage
(
"Couldn't encode frame to video"
);
setLastErrorMessage
(
"Couldn't encode frame to video"
);
...
@@ -272,9 +451,9 @@ void UBFFmpegVideoEncoder::finishEncoding()
...
@@ -272,9 +451,9 @@ void UBFFmpegVideoEncoder::finishEncoding()
}
}
if
(
gotOutput
)
{
if
(
gotOutput
)
{
AVRational
codecTimebase
=
mVideoStream
->
codec
->
time_base
;
AVRational
codecTimebase
=
mVideoStream
->
codec
->
time_base
;
AVRational
streamTimebase
=
mVideoStream
->
time_base
;
AVRational
stream
Video
Timebase
=
mVideoStream
->
time_base
;
av_packet_rescale_ts
(
packet
,
codecTimebase
,
streamTimebase
);
av_packet_rescale_ts
(
packet
,
codecTimebase
,
stream
Video
Timebase
);
packet
->
stream_index
=
mVideoStream
->
index
;
packet
->
stream_index
=
mVideoStream
->
index
;
av_interleaved_write_frame
(
mOutputFormatContext
,
packet
);
av_interleaved_write_frame
(
mOutputFormatContext
,
packet
);
...
@@ -282,16 +461,48 @@ void UBFFmpegVideoEncoder::finishEncoding()
...
@@ -282,16 +461,48 @@ void UBFFmpegVideoEncoder::finishEncoding()
}
}
}
while
(
gotOutput
);
}
while
(
gotOutput
);
if
(
mShouldRecordAudio
)
{
int
gotOutput
,
ret
;
do
{
AVPacket
*
packet
=
mVideoWorker
->
mAudioPacket
;
ret
=
avcodec_encode_audio2
(
mAudioStream
->
codec
,
packet
,
NULL
,
&
gotOutput
);
if
(
ret
<
0
)
setLastErrorMessage
(
"Couldn't encode frame to audio"
);
else
if
(
gotOutput
)
{
AVRational
codecTimebase
=
mAudioStream
->
codec
->
time_base
;
AVRational
streamVideoTimebase
=
mAudioStream
->
time_base
;
av_packet_rescale_ts
(
packet
,
codecTimebase
,
streamVideoTimebase
);
packet
->
stream_index
=
mAudioStream
->
index
;
av_interleaved_write_frame
(
mOutputFormatContext
,
packet
);
av_packet_unref
(
packet
);
}
}
while
(
gotOutput
);
}
av_write_trailer
(
mOutputFormatContext
);
av_write_trailer
(
mOutputFormatContext
);
avio_close
(
mOutputFormatContext
->
pb
);
avio_close
(
mOutputFormatContext
->
pb
);
avcodec_close
(
mVideoStream
->
codec
);
avcodec_close
(
mVideoStream
->
codec
);
sws_freeContext
(
mSwsContext
);
sws_freeContext
(
mSwsContext
);
if
(
mShouldRecordAudio
)
{
avcodec_close
(
mAudioStream
->
codec
);
swr_free
(
&
mSwrContext
);
}
avformat_free_context
(
mOutputFormatContext
);
avformat_free_context
(
mOutputFormatContext
);
emit
encodingFinished
(
true
);
emit
encodingFinished
(
true
);
}
}
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
// Worker
// Worker
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
...
@@ -301,7 +512,8 @@ UBFFmpegVideoEncoderWorker::UBFFmpegVideoEncoderWorker(UBFFmpegVideoEncoder* con
...
@@ -301,7 +512,8 @@ UBFFmpegVideoEncoderWorker::UBFFmpegVideoEncoderWorker(UBFFmpegVideoEncoder* con
{
{
mStopRequested
=
false
;
mStopRequested
=
false
;
mIsRunning
=
false
;
mIsRunning
=
false
;
mPacket
=
new
AVPacket
();
mVideoPacket
=
new
AVPacket
();
mAudioPacket
=
new
AVPacket
();
}
}
UBFFmpegVideoEncoderWorker
::~
UBFFmpegVideoEncoderWorker
()
UBFFmpegVideoEncoderWorker
::~
UBFFmpegVideoEncoderWorker
()
...
@@ -316,11 +528,23 @@ void UBFFmpegVideoEncoderWorker::stopEncoding()
...
@@ -316,11 +528,23 @@ void UBFFmpegVideoEncoderWorker::stopEncoding()
void
UBFFmpegVideoEncoderWorker
::
queueFrame
(
AVFrame
*
frame
)
void
UBFFmpegVideoEncoderWorker
::
queueFrame
(
AVFrame
*
frame
)
{
{
if
(
frame
)
{
mFrameQueueMutex
.
lock
();
mFrameQueueMutex
.
lock
();
mFram
eQueue
.
enqueue
(
frame
);
mImag
eQueue
.
enqueue
(
frame
);
mFrameQueueMutex
.
unlock
();
mFrameQueueMutex
.
unlock
();
}
}
}
void
UBFFmpegVideoEncoderWorker
::
queueAudio
(
AVFrame
*
frame
)
{
if
(
frame
)
{
mFrameQueueMutex
.
lock
();
mAudioQueue
.
enqueue
(
frame
);
mFrameQueueMutex
.
unlock
();
}
}
/**
/**
* The main encoding function. Takes the queued image frames and
* The main encoding function. Takes the queued image frames and
* assembles them into the video
* assembles them into the video
...
@@ -333,15 +557,13 @@ void UBFFmpegVideoEncoderWorker::runEncoding()
...
@@ -333,15 +557,13 @@ void UBFFmpegVideoEncoderWorker::runEncoding()
mFrameQueueMutex
.
lock
();
mFrameQueueMutex
.
lock
();
mWaitCondition
.
wait
(
&
mFrameQueueMutex
);
mWaitCondition
.
wait
(
&
mFrameQueueMutex
);
while
(
!
m
Fram
eQueue
.
isEmpty
())
{
while
(
!
m
Imag
eQueue
.
isEmpty
())
{
writeLatestVideoFrame
();
writeLatestVideoFrame
();
}
}
/*
while
(
!
mAudioQueue
.
isEmpty
())
{
while
(
!
mAudioQueue
.
isEmpty
())
{
writeLatestAudioFrame
();
writeLatestAudioFrame
();
}
}
*/
mFrameQueueMutex
.
unlock
();
mFrameQueueMutex
.
unlock
();
}
}
...
@@ -351,31 +573,31 @@ void UBFFmpegVideoEncoderWorker::runEncoding()
...
@@ -351,31 +573,31 @@ void UBFFmpegVideoEncoderWorker::runEncoding()
void
UBFFmpegVideoEncoderWorker
::
writeLatestVideoFrame
()
void
UBFFmpegVideoEncoderWorker
::
writeLatestVideoFrame
()
{
{
AVFrame
*
frame
=
m
Fram
eQueue
.
dequeue
();
AVFrame
*
frame
=
m
Imag
eQueue
.
dequeue
();
int
gotOutput
;
int
gotOutput
;
av_init_packet
(
mPacket
);
av_init_packet
(
m
Video
Packet
);
mPacket
->
data
=
NULL
;
m
Video
Packet
->
data
=
NULL
;
mPacket
->
size
=
0
;
m
Video
Packet
->
size
=
0
;
// qDebug() << "Encoding frame to video. Pts: " << frame->pts << "/" << mController->mTimebase;
// qDebug() << "Encoding frame to video. Pts: " << frame->pts << "/" << mController->m
Video
Timebase;
if
(
avcodec_encode_video2
(
mController
->
mVideoStream
->
codec
,
mPacket
,
frame
,
&
gotOutput
)
<
0
)
if
(
avcodec_encode_video2
(
mController
->
mVideoStream
->
codec
,
m
Video
Packet
,
frame
,
&
gotOutput
)
<
0
)
emit
error
(
"Error encoding
frame to video
"
);
emit
error
(
"Error encoding
video frame
"
);
if
(
gotOutput
)
{
if
(
gotOutput
)
{
AVRational
codecTimebase
=
mController
->
mVideoStream
->
codec
->
time_base
;
AVRational
codecTimebase
=
mController
->
mVideoStream
->
codec
->
time_base
;
AVRational
streamTimebase
=
mController
->
mVideoStream
->
time_base
;
AVRational
stream
Video
Timebase
=
mController
->
mVideoStream
->
time_base
;
// recalculate the timestamp to match the stream's timebase
// recalculate the timestamp to match the stream's timebase
av_packet_rescale_ts
(
m
Packet
,
codecTimebase
,
stream
Timebase
);
av_packet_rescale_ts
(
m
VideoPacket
,
codecTimebase
,
streamVideo
Timebase
);
mPacket
->
stream_index
=
mController
->
mVideoStream
->
index
;
m
Video
Packet
->
stream_index
=
mController
->
mVideoStream
->
index
;
// qDebug() << "Writing encoded packet to file; pts: " << m
Packet->pts << "/" << stream
Timebase.den;
// qDebug() << "Writing encoded packet to file; pts: " << m
VideoPacket->pts << "/" << streamVideo
Timebase.den;
av_interleaved_write_frame
(
mController
->
mOutputFormatContext
,
mPacket
);
av_interleaved_write_frame
(
mController
->
mOutputFormatContext
,
m
Video
Packet
);
av_packet_unref
(
mPacket
);
av_packet_unref
(
m
Video
Packet
);
}
}
// Duct-tape solution. I assume there's a better way of doing this, but:
// Duct-tape solution. I assume there's a better way of doing this, but:
...
@@ -387,10 +609,42 @@ void UBFFmpegVideoEncoderWorker::writeLatestVideoFrame()
...
@@ -387,10 +609,42 @@ void UBFFmpegVideoEncoderWorker::writeLatestVideoFrame()
if
(
firstRun
)
{
if
(
firstRun
)
{
firstRun
=
false
;
firstRun
=
false
;
frame
->
pts
+=
1
;
frame
->
pts
+=
1
;
m
Fram
eQueue
.
enqueue
(
frame
);
// only works when the queue is empty at this point. todo: clean this up!
m
Imag
eQueue
.
enqueue
(
frame
);
// only works when the queue is empty at this point. todo: clean this up!
}
}
else
else
// free the frame
// free the frame
av_frame_free
(
&
frame
);
av_frame_free
(
&
frame
);
}
}
void
UBFFmpegVideoEncoderWorker
::
writeLatestAudioFrame
()
{
AVFrame
*
frame
=
mAudioQueue
.
dequeue
();
int
gotOutput
,
ret
;
av_init_packet
(
mAudioPacket
);
mAudioPacket
->
data
=
NULL
;
mAudioPacket
->
size
=
0
;
//qDebug() << "Encoding audio frame";
ret
=
avcodec_encode_audio2
(
mController
->
mAudioStream
->
codec
,
mAudioPacket
,
frame
,
&
gotOutput
);
if
(
ret
<
0
)
emit
error
(
QString
(
"Error encoding audio frame: "
)
+
avErrorToQString
(
ret
));
else
if
(
gotOutput
)
{
//qDebug() << "Writing audio frame to stream";
AVRational
codecTimebase
=
mController
->
mAudioStream
->
codec
->
time_base
;
AVRational
streamVideoTimebase
=
mController
->
mAudioStream
->
time_base
;
av_packet_rescale_ts
(
mAudioPacket
,
codecTimebase
,
streamVideoTimebase
);
mAudioPacket
->
stream_index
=
mController
->
mAudioStream
->
index
;
av_interleaved_write_frame
(
mController
->
mOutputFormatContext
,
mAudioPacket
);
av_packet_unref
(
mAudioPacket
);
}
av_frame_free
(
&
frame
);
}
src/podcast/ffmpeg/UBFFmpegVideoEncoder.h
View file @
11c207d7
...
@@ -5,20 +5,23 @@ extern "C" {
...
@@ -5,20 +5,23 @@ extern "C" {
#include <libavcodec/avcodec.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
#include <libavformat/avio.h>
#include <libavutil/audio_fifo.h>
#include <libavutil/avutil.h>
#include <libavutil/avutil.h>
#include <libavutil/imgutils.h>
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libavutil/opt.h>
#include <libavutil/mathematics.h>
#include <libavutil/mathematics.h>
#include <libavutil/time.h>
#include <libswscale/swscale.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
}
}
#include <atomic>
#include <atomic>
#include <stdio.h>
#include <QtCore>
#include <QtCore>
#include <QImage>
#include <QImage>
#include "podcast/UBAbstractVideoEncoder.h"
#include "podcast/UBAbstractVideoEncoder.h"
#include "podcast/ffmpeg/UBMicrophoneInput.h"
class
UBFFmpegVideoEncoderWorker
;
class
UBFFmpegVideoEncoderWorker
;
class
UBPodcastController
;
class
UBPodcastController
;
...
@@ -45,7 +48,6 @@ public:
...
@@ -45,7 +48,6 @@ public:
void
setRecordAudio
(
bool
pRecordAudio
)
{
mShouldRecordAudio
=
pRecordAudio
;
}
void
setRecordAudio
(
bool
pRecordAudio
)
{
mShouldRecordAudio
=
pRecordAudio
;
}
signals
:
signals
:
void
encodingFinished
(
bool
ok
);
void
encodingFinished
(
bool
ok
);
...
@@ -53,6 +55,7 @@ signals:
...
@@ -53,6 +55,7 @@ signals:
private
slots
:
private
slots
:
void
setLastErrorMessage
(
const
QString
&
pMessage
);
void
setLastErrorMessage
(
const
QString
&
pMessage
);
void
onAudioAvailable
(
QByteArray
data
);
void
finishEncoding
();
void
finishEncoding
();
private
:
private
:
...
@@ -63,32 +66,42 @@ private:
...
@@ -63,32 +66,42 @@ private:
long
timestamp
;
// unit: ms
long
timestamp
;
// unit: ms
};
};
AVFrame
*
convertFrame
(
ImageFrame
frame
);
AVFrame
*
convertImageFrame
(
ImageFrame
frame
);
AVFrame
*
convertAudio
(
QByteArray
data
);
void
processAudio
(
QByteArray
&
data
);
bool
init
();
bool
init
();
// Queue for any pixmap that might be sent before the encoder is ready
QQueue
<
ImageFrame
>
mPendingFrames
;
QString
mLastErrorMessage
;
QString
mLastErrorMessage
;
bool
mShouldRecordAudio
;
QThread
*
mVideoEncoderThread
;
QThread
*
mVideoEncoderThread
;
UBFFmpegVideoEncoderWorker
*
mVideoWorker
;
UBFFmpegVideoEncoderWorker
*
mVideoWorker
;
// Muxer
// Muxer
// ------------------------------------------
AVFormatContext
*
mOutputFormatContext
;
AVFormatContext
*
mOutputFormatContext
;
int
mTimebase
;
AVStream
*
mVideoStream
;
AVStream
*
mAudioStream
;
// Video
// Video
AVStream
*
mVideoStream
;
// ------------------------------------------
QQueue
<
ImageFrame
>
mPendingFrames
;
struct
SwsContext
*
mSwsContext
;
struct
SwsContext
*
mSwsContext
;
// Audio
int
mVideoTimebase
;
AVStream
*
mAudioStream
;
// Audio
// ------------------------------------------
bool
mShouldRecordAudio
;
FILE
*
mFile
;
UBMicrophoneInput
*
mAudioInput
;
struct
SwrContext
*
mSwrContext
;
/// Queue for audio that has been rescaled/converted but not encoded yet
AVAudioFifo
*
mAudioOutBuffer
;
/// Sample rate for encoded audio
int
mAudioSampleRate
;
/// Total audio frames sent to encoder
int
mAudioFrameCount
;
};
};
...
@@ -105,6 +118,7 @@ public:
...
@@ -105,6 +118,7 @@ public:
bool
isRunning
()
{
return
mIsRunning
;
}
bool
isRunning
()
{
return
mIsRunning
;
}
void
queueFrame
(
AVFrame
*
frame
);
void
queueFrame
(
AVFrame
*
frame
);
void
queueAudio
(
AVFrame
*
frame
);
public
slots
:
public
slots
:
void
runEncoding
();
void
runEncoding
();
...
@@ -117,19 +131,23 @@ signals:
...
@@ -117,19 +131,23 @@ signals:
private
:
private
:
void
writeLatestVideoFrame
();
void
writeLatestVideoFrame
();
void
writeLatestAudioFrame
();
UBFFmpegVideoEncoder
*
mController
;
UBFFmpegVideoEncoder
*
mController
;
// std::atomic is C++11. This won't work with msvc2010, so a
// std::atomic is C++11. This won't work with msvc2010, so a
// newer compiler must be used if this is to be used on Windows
// newer compiler must be used if this
class
is to be used on Windows
std
::
atomic
<
bool
>
mStopRequested
;
std
::
atomic
<
bool
>
mStopRequested
;
std
::
atomic
<
bool
>
mIsRunning
;
std
::
atomic
<
bool
>
mIsRunning
;
QQueue
<
AVFrame
*>
mFrameQueue
;
QQueue
<
AVFrame
*>
mImageQueue
;
QQueue
<
AVFrame
*>
mAudioQueue
;
QMutex
mFrameQueueMutex
;
QMutex
mFrameQueueMutex
;
QWaitCondition
mWaitCondition
;
QWaitCondition
mWaitCondition
;
AVPacket
*
mPacket
;
AVPacket
*
mVideoPacket
;
AVPacket
*
mAudioPacket
;
};
};
#endif // UBFFMPEGVIDEOENCODER_H
#endif // UBFFMPEGVIDEOENCODER_H
src/podcast/ffmpeg/UBMicrophoneInput.cpp
0 → 100644
View file @
11c207d7
#include "UBMicrophoneInput.h"
UBMicrophoneInput
::
UBMicrophoneInput
()
:
mAudioInput
(
NULL
)
,
mIODevice
(
NULL
)
,
mSeekPos
(
0
)
{
}
UBMicrophoneInput
::~
UBMicrophoneInput
()
{
if
(
mAudioInput
)
delete
mAudioInput
;
}
int
UBMicrophoneInput
::
channelCount
()
{
return
mAudioFormat
.
channelCount
();
}
int
UBMicrophoneInput
::
sampleRate
()
{
return
mAudioFormat
.
sampleRate
();
}
/* Return the sample size in bits */
int
UBMicrophoneInput
::
sampleSize
()
{
return
mAudioFormat
.
sampleSize
();
}
/** Return the sample format in FFMpeg style (AVSampleFormat enum) */
int
UBMicrophoneInput
::
sampleFormat
()
{
enum
AVSampleFormat
{
AV_SAMPLE_FMT_NONE
=
-
1
,
AV_SAMPLE_FMT_U8
,
AV_SAMPLE_FMT_S16
,
AV_SAMPLE_FMT_S32
,
AV_SAMPLE_FMT_FLT
,
AV_SAMPLE_FMT_DBL
,
AV_SAMPLE_FMT_U8P
,
AV_SAMPLE_FMT_S16P
,
AV_SAMPLE_FMT_S32P
,
AV_SAMPLE_FMT_FLTP
,
AV_SAMPLE_FMT_DBLP
,
AV_SAMPLE_FMT_NB
};
int
sampleSize
=
mAudioFormat
.
sampleSize
();
QAudioFormat
::
SampleType
sampleType
=
mAudioFormat
.
sampleType
();
// qDebug() << "Input sample format: " << sampleSize << "bits " << sampleType;
switch
(
sampleType
)
{
case
QAudioFormat
:
:
Unknown
:
return
AV_SAMPLE_FMT_NONE
;
case
QAudioFormat
:
:
SignedInt
:
if
(
sampleSize
==
16
)
return
AV_SAMPLE_FMT_S16
;
if
(
sampleSize
==
32
)
return
AV_SAMPLE_FMT_S32
;
case
QAudioFormat
:
:
UnSignedInt
:
if
(
sampleSize
==
8
)
return
AV_SAMPLE_FMT_U8
;
case
QAudioFormat
:
:
Float
:
return
AV_SAMPLE_FMT_FLT
;
default
:
return
AV_SAMPLE_FMT_NONE
;
}
}
QString
UBMicrophoneInput
::
codec
()
{
return
mAudioFormat
.
codec
();
}
qint64
UBMicrophoneInput
::
processUSecs
()
const
{
return
mAudioInput
->
processedUSecs
();
}
bool
UBMicrophoneInput
::
init
()
{
if
(
mAudioDeviceInfo
.
isNull
())
{
qWarning
(
"No audio input device selected; using default"
);
mAudioDeviceInfo
=
QAudioDeviceInfo
::
defaultInputDevice
();
}
qDebug
()
<<
"Input device name: "
<<
mAudioDeviceInfo
.
deviceName
();
mAudioFormat
=
mAudioDeviceInfo
.
preferredFormat
();
mAudioInput
=
new
QAudioInput
(
mAudioDeviceInfo
,
mAudioFormat
,
NULL
);
//mAudioInput->setNotifyInterval(100);
connect
(
mAudioInput
,
SIGNAL
(
stateChanged
(
QAudio
::
State
)),
this
,
SLOT
(
onAudioInputStateChanged
(
QAudio
::
State
)));
return
true
;
}
void
UBMicrophoneInput
::
start
()
{
qDebug
()
<<
"starting audio input"
;
mIODevice
=
mAudioInput
->
start
();
connect
(
mIODevice
,
SIGNAL
(
readyRead
()),
this
,
SLOT
(
onDataReady
()));
if
(
mAudioInput
->
error
()
==
QAudio
::
OpenError
)
qWarning
()
<<
"Error opening audio input"
;
}
void
UBMicrophoneInput
::
stop
()
{
mAudioInput
->
stop
();
}
QStringList
UBMicrophoneInput
::
availableDevicesNames
()
{
QStringList
names
;
QList
<
QAudioDeviceInfo
>
devices
=
QAudioDeviceInfo
::
availableDevices
(
QAudio
::
AudioInput
);
foreach
(
QAudioDeviceInfo
device
,
devices
)
{
names
.
push_back
(
device
.
deviceName
());
}
return
names
;
}
void
UBMicrophoneInput
::
setInputDevice
(
QString
name
)
{
if
(
name
.
isEmpty
())
{
mAudioDeviceInfo
=
QAudioDeviceInfo
::
defaultInputDevice
();
return
;
}
QList
<
QAudioDeviceInfo
>
devices
=
QAudioDeviceInfo
::
availableDevices
(
QAudio
::
AudioInput
);
bool
found
=
false
;
foreach
(
QAudioDeviceInfo
device
,
devices
)
{
if
(
device
.
deviceName
()
==
name
)
{
mAudioDeviceInfo
=
device
;
found
=
true
;
break
;
}
}
if
(
!
found
)
{
qWarning
()
<<
"Audio input device not found; using default instead"
;
mAudioDeviceInfo
=
QAudioDeviceInfo
::
defaultInputDevice
();
}
}
void
UBMicrophoneInput
::
onDataReady
()
{
int
numBytes
=
mAudioInput
->
bytesReady
();
if
(
numBytes
>
0
)
emit
dataAvailable
(
mIODevice
->
read
(
numBytes
));
}
void
UBMicrophoneInput
::
onAudioInputStateChanged
(
QAudio
::
State
state
)
{
qDebug
()
<<
"Audio input state changed to "
<<
state
;
switch
(
state
)
{
case
QAudio
:
:
StoppedState
:
if
(
mAudioInput
->
error
()
!=
QAudio
::
NoError
)
{
emit
error
(
getErrorString
(
mAudioInput
->
error
()));
}
break
;
// handle other states?
default
:
break
;
}
}
/**
* @brief Return a meaningful error string based on QAudio error codes
*/
QString
UBMicrophoneInput
::
getErrorString
(
QAudio
::
Error
errorCode
)
{
switch
(
errorCode
)
{
case
QAudio
:
:
NoError
:
return
""
;
case
QAudio
:
:
OpenError
:
return
"Couldn't open the audio device"
;
case
QAudio
:
:
IOError
:
return
"Error reading from audio device"
;
case
QAudio
:
:
UnderrunError
:
return
"Underrun error"
;
case
QAudio
:
:
FatalError
:
return
"Fatal error; audio device unusable"
;
}
return
""
;
}
src/podcast/ffmpeg/UBMicrophoneInput.h
0 → 100644
View file @
11c207d7
#ifndef UBMICROPHONEINPUT_H
#define UBMICROPHONEINPUT_H
#include <QtCore>
#include <QAudioInput>
/**
* @brief The UBMicrophoneInput class captures uncompressed sound from a microphone
*/
class
UBMicrophoneInput
:
public
QObject
{
Q_OBJECT
public
:
UBMicrophoneInput
();
virtual
~
UBMicrophoneInput
();
bool
init
();
void
start
();
void
stop
();
static
QStringList
availableDevicesNames
();
void
setInputDevice
(
QString
name
=
""
);
int
channelCount
();
int
sampleRate
();
int
sampleSize
();
int
sampleFormat
();
QString
codec
();
qint64
processUSecs
()
const
;
signals
:
/// Send the new volume, between 0 and 255
void
audioLevelChanged
(
quint8
level
);
/// Emitted when new audio data is available
void
dataAvailable
(
QByteArray
data
);
void
error
(
QString
message
);
private
slots
:
void
onAudioInputStateChanged
(
QAudio
::
State
state
);
void
onDataReady
();
private
:
QString
getErrorString
(
QAudio
::
Error
errorCode
);
QAudioInput
*
mAudioInput
;
QIODevice
*
mIODevice
;
QAudioDeviceInfo
mAudioDeviceInfo
;
QAudioFormat
mAudioFormat
;
qint64
mSeekPos
;
};
#endif // UBMICROPHONEINPUT_H
src/podcast/podcast.pri
View file @
11c207d7
...
@@ -3,13 +3,15 @@ HEADERS += src/podcast/UBPodcastController.h \
...
@@ -3,13 +3,15 @@ HEADERS += src/podcast/UBPodcastController.h \
src/podcast/UBAbstractVideoEncoder.h \
src/podcast/UBAbstractVideoEncoder.h \
src/podcast/UBPodcastRecordingPalette.h \
src/podcast/UBPodcastRecordingPalette.h \
src/podcast/youtube/UBYouTubePublisher.h \
src/podcast/youtube/UBYouTubePublisher.h \
src/podcast/intranet/UBIntranetPodcastPublisher.h
src/podcast/intranet/UBIntranetPodcastPublisher.h \
$$PWD/ffmpeg/UBMicrophoneInput.h
SOURCES += src/podcast/UBPodcastController.cpp \
SOURCES += src/podcast/UBPodcastController.cpp \
src/podcast/UBAbstractVideoEncoder.cpp \
src/podcast/UBAbstractVideoEncoder.cpp \
src/podcast/UBPodcastRecordingPalette.cpp \
src/podcast/UBPodcastRecordingPalette.cpp \
src/podcast/youtube/UBYouTubePublisher.cpp \
src/podcast/youtube/UBYouTubePublisher.cpp \
src/podcast/intranet/UBIntranetPodcastPublisher.cpp
src/podcast/intranet/UBIntranetPodcastPublisher.cpp \
$$PWD/ffmpeg/UBMicrophoneInput.cpp
win32 {
win32 {
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment