diff --git a/LFLiveKit/coder/LFHardwareAudioEncoder.m b/LFLiveKit/coder/LFHardwareAudioEncoder.m index 64f97225..dc8197cd 100755 --- a/LFLiveKit/coder/LFHardwareAudioEncoder.m +++ b/LFLiveKit/coder/LFHardwareAudioEncoder.m @@ -61,29 +61,36 @@ - (void)encodeAudioData:(nullable NSData*)audioData timeStamp:(uint64_t)timeStam } if(leftLength + audioData.length >= self.configuration.bufferLength){ - ///<  发送 + ///<  发送 达到发送大小 NSInteger totalSize = leftLength + audioData.length; - NSInteger encodeCount = totalSize/self.configuration.bufferLength; + NSInteger encodeCount = totalSize/self.configuration.bufferLength;////audioData.length比较大,totalSize可能会是bufferLengthleft多倍 char *totalBuf = malloc(totalSize); char *p = totalBuf; - - memset(totalBuf, (int)totalSize, 0); - memcpy(totalBuf, leftBuf, leftLength); - memcpy(totalBuf + leftLength, audioData.bytes, audioData.length); + //TODO cz https://github.com/LaiFengiOS/LFLiveKit/pull/252/files +// memset(totalBuf, (int)totalSize, 0); +// memcpy(totalBuf, leftBuf, leftLength); +// memcpy(totalBuf + leftLength, audioData.bytes, audioData.length); + memset(totalBuf, 0, (int)totalSize);//初始化totalBuf + memcpy(totalBuf, leftBuf, leftLength);//copy上次剩余 + memcpy(totalBuf + leftLength, audioData.bytes, audioData.length);////copy此次数据 for(NSInteger index = 0;index < encodeCount;index++){ + //从p的位置开始发送,发送bufferLength大小 [self encodeBuffer:p timeStamp:timeStamp]; p += self.configuration.bufferLength; } leftLength = totalSize%self.configuration.bufferLength; - memset(leftBuf, 0, self.configuration.bufferLength); - memcpy(leftBuf, totalBuf + (totalSize -leftLength), leftLength); +// memset(leftBuf, 0, self.configuration.bufferLength); +// memcpy(leftBuf, totalBuf + (totalSize -leftLength), leftLength); + + memset(leftBuf, 0, self.configuration.bufferLength);//将leftBuf起始位置到bufferLength置0 + memcpy(leftBuf, totalBuf + (totalSize -leftLength), leftLength);//将totalBuf剩余的放倒leftBuf当中,下次发送 free(totalBuf); }else{ - ///< 积累 + ///< 积累 未达到发送大小 memcpy(leftBuf+leftLength, audioData.bytes, audioData.length); leftLength = leftLength + audioData.length; } @@ -95,36 +102,36 @@ - (void)encodeBuffer:(char*)buf timeStamp:(uint64_t)timeStamp{ inBuffer.mNumberChannels = 1; inBuffer.mData = buf; inBuffer.mDataByteSize = (UInt32)self.configuration.bufferLength; - + //// 初始化一个输入缓冲列表 AudioBufferList buffers; - buffers.mNumberBuffers = 1; + buffers.mNumberBuffers = 1;//只有一个inBuffer buffers.mBuffers[0] = inBuffer; // 初始化一个输出缓冲列表 AudioBufferList outBufferList; - outBufferList.mNumberBuffers = 1; + outBufferList.mNumberBuffers = 1;//只有一个outBuffer outBufferList.mBuffers[0].mNumberChannels = inBuffer.mNumberChannels; outBufferList.mBuffers[0].mDataByteSize = inBuffer.mDataByteSize; // 设置缓冲区大小 - outBufferList.mBuffers[0].mData = aacBuf; // 设置AAC缓冲区 + outBufferList.mBuffers[0].mData = aacBuf; // 设置AAC缓冲区 编码后数据存放的位置 UInt32 outputDataPacketSize = 1; if (AudioConverterFillComplexBuffer(m_converter, inputDataProc, &buffers, &outputDataPacketSize, &outBufferList, NULL) != noErr) { return; } - + //封装为LFAudioFrame方便以后推流使用 LFAudioFrame *audioFrame = [LFAudioFrame new]; audioFrame.timestamp = timeStamp; audioFrame.data = [NSData dataWithBytes:aacBuf length:outBufferList.mBuffers[0].mDataByteSize]; - char exeData[2]; + char exeData[2];//flv编码音频头 44100 为0x12 0x10 exeData[0] = _configuration.asc[0]; exeData[1] = _configuration.asc[1]; audioFrame.audioInfo = [NSData dataWithBytes:exeData length:2]; if (self.aacDeleage && [self.aacDeleage respondsToSelector:@selector(audioEncoder:audioFrame:)]) { - [self.aacDeleage audioEncoder:self audioFrame:audioFrame]; + [self.aacDeleage audioEncoder:self audioFrame:audioFrame];//调用编码完成后代理 } - if (self->enabledWriteVideoFile) { + if (self->enabledWriteVideoFile) {//写入本地文件中,debug时调用 NSData *adts = [self adtsData:_configuration.numberOfChannels rawDataLength:audioFrame.data.length]; fwrite(adts.bytes, 1, adts.length, self->fp); fwrite(audioFrame.data.bytes, 1, audioFrame.data.length, self->fp); @@ -143,39 +150,40 @@ - (BOOL)createAudioConvert { //根据输入样本初始化一个编码转换器 } AudioStreamBasicDescription inputFormat = {0}; - inputFormat.mSampleRate = _configuration.audioSampleRate; - inputFormat.mFormatID = kAudioFormatLinearPCM; + inputFormat.mSampleRate = _configuration.audioSampleRate;// 采样率 + inputFormat.mFormatID = kAudioFormatLinearPCM;// 数据格式 inputFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked; - inputFormat.mChannelsPerFrame = (UInt32)_configuration.numberOfChannels; - inputFormat.mFramesPerPacket = 1; - inputFormat.mBitsPerChannel = 16; - inputFormat.mBytesPerFrame = inputFormat.mBitsPerChannel / 8 * inputFormat.mChannelsPerFrame; - inputFormat.mBytesPerPacket = inputFormat.mBytesPerFrame * inputFormat.mFramesPerPacket; + inputFormat.mChannelsPerFrame = (UInt32)_configuration.numberOfChannels;// 声道数 + inputFormat.mFramesPerPacket = 1;//packet中包含的frame数目,无压缩时为1,可变比特率时,一个大点儿的固定值例如在ACC中1024。 + inputFormat.mBitsPerChannel = 16;// 每个声道比特数,语音每采样点占用位数 + inputFormat.mBytesPerFrame = inputFormat.mBitsPerChannel / 8 * inputFormat.mChannelsPerFrame;// 每帧多少字节 + inputFormat.mBytesPerPacket = inputFormat.mBytesPerFrame * inputFormat.mFramesPerPacket;// 一个packet中的字节数目,如果时可变的packet则为0 AudioStreamBasicDescription outputFormat; // 这里开始是输出音频格式 - memset(&outputFormat, 0, sizeof(outputFormat)); + memset(&outputFormat, 0, sizeof(outputFormat));// 初始化 outputFormat.mSampleRate = inputFormat.mSampleRate; // 采样率保持一致 outputFormat.mFormatID = kAudioFormatMPEG4AAC; // AAC编码 kAudioFormatMPEG4AAC kAudioFormatMPEG4AAC_HE_V2 outputFormat.mChannelsPerFrame = (UInt32)_configuration.numberOfChannels;; outputFormat.mFramesPerPacket = 1024; // AAC一帧是1024个字节 const OSType subtype = kAudioFormatMPEG4AAC; + //两种编码方式 软编码 硬编码 AudioClassDescription requestedCodecs[2] = { { kAudioEncoderComponentType, subtype, - kAppleSoftwareAudioCodecManufacturer + kAppleSoftwareAudioCodecManufacturer// 软编码 }, { kAudioEncoderComponentType, subtype, - kAppleHardwareAudioCodecManufacturer + kAppleHardwareAudioCodecManufacturer// 硬编码 } }; - OSStatus result = AudioConverterNewSpecific(&inputFormat, &outputFormat, 2, requestedCodecs, &m_converter);; + OSStatus result = AudioConverterNewSpecific(&inputFormat, &outputFormat, 2, requestedCodecs, &m_converter);//创建AudioConverter :输入描述,输出描述,requestedCodecs的数量,支持的编码方式,AudioConverter UInt32 outputBitrate = _configuration.audioBitrate; - UInt32 propSize = sizeof(outputBitrate); + UInt32 propSize = sizeof(outputBitrate);//设置码率 if(result == noErr) { diff --git a/LFLiveKit/coder/LFHardwareVideoEncoder.m b/LFLiveKit/coder/LFHardwareVideoEncoder.m index 6c3d20fe..6cb71d05 100755 --- a/LFLiveKit/coder/LFHardwareVideoEncoder.m +++ b/LFLiveKit/coder/LFHardwareVideoEncoder.m @@ -43,7 +43,7 @@ - (instancetype)initWithVideoStreamConfiguration:(LFLiveVideoConfiguration *)con return self; } -- (void)resetCompressionSession { +- (void)resetCompressionSession {////重置VTCompressionSessionRef if (compressionSession) { VTCompressionSessionCompleteFrames(compressionSession, kCMTimeInvalid); @@ -51,23 +51,36 @@ - (void)resetCompressionSession { CFRelease(compressionSession); compressionSession = NULL; } - + //创建VTCompressionSessionRef用于编码h.264 VideoCompressonOutputCallback为编码完成后回掉 OSStatus status = VTCompressionSessionCreate(NULL, _configuration.videoSize.width, _configuration.videoSize.height, kCMVideoCodecType_H264, NULL, NULL, NULL, VideoCompressonOutputCallback, (__bridge void *)self, &compressionSession); if (status != noErr) { return; } + //设置VTCompressionSessionRef参数 _currentVideoBitRate = _configuration.videoBitRate; + // 设置最大关键帧间隔,即gop size VTSessionSetProperty(compressionSession, kVTCompressionPropertyKey_MaxKeyFrameInterval, (__bridge CFTypeRef)@(_configuration.videoMaxKeyframeInterval)); VTSessionSetProperty(compressionSession, kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration, (__bridge CFTypeRef)@(_configuration.videoMaxKeyframeInterval/_configuration.videoFrameRate)); + // 设置帧率,只用于初始化session,不是实际FPS VTSessionSetProperty(compressionSession, kVTCompressionPropertyKey_ExpectedFrameRate, (__bridge CFTypeRef)@(_configuration.videoFrameRate)); + // 设置编码码率(比特率),如果不设置,默认将会以很低的码率编码,导致编码出来的视频很模糊 VTSessionSetProperty(compressionSession, kVTCompressionPropertyKey_AverageBitRate, (__bridge CFTypeRef)@(_configuration.videoBitRate)); - NSArray *limit = @[@(_configuration.videoBitRate * 1.5/8), @(1)]; + +// NSArray *limit = @[@(_configuration.videoBitRate * 1.5/8), @(1)]; + // 设置数据速率限制 + NSArray *limit = @[@(_configuration.videoBitRate * 1.5/8), @(1)];// CFArray[CFNumber], [bytes, seconds, bytes, seconds...] + VTSessionSetProperty(compressionSession, kVTCompressionPropertyKey_DataRateLimits, (__bridge CFArrayRef)limit); + // 设置实时编码输出,降低编码延迟 VTSessionSetProperty(compressionSession, kVTCompressionPropertyKey_RealTime, kCFBooleanTrue); + // h264 profile, 直播一般使用baseline,可减少由于b帧带来的延时 VTSessionSetProperty(compressionSession, kVTCompressionPropertyKey_ProfileLevel, kVTProfileLevel_H264_Main_AutoLevel); + // 设置允许帧重新排序 VTSessionSetProperty(compressionSession, kVTCompressionPropertyKey_AllowFrameReordering, kCFBooleanTrue); + // 设置编码类型h.264 VTSessionSetProperty(compressionSession, kVTCompressionPropertyKey_H264EntropyMode, kVTH264EntropyMode_CABAC); + // 准备编码 VTCompressionSessionPrepareToEncodeFrames(compressionSession); } @@ -99,6 +112,7 @@ - (void)dealloc { - (void)encodeVideoData:(CVPixelBufferRef)pixelBuffer timeStamp:(uint64_t)timeStamp { if(_isBackGround) return; frameCount++; + // fps CMTime presentationTimeStamp = CMTimeMake(frameCount, (int32_t)_configuration.videoFrameRate); VTEncodeInfoFlags flags; CMTime duration = CMTimeMake(1, (int32_t)_configuration.videoFrameRate); @@ -108,7 +122,7 @@ - (void)encodeVideoData:(CVPixelBufferRef)pixelBuffer timeStamp:(uint64_t)timeSt properties = @{(__bridge NSString *)kVTEncodeFrameOptionKey_ForceKeyFrame: @YES}; } NSNumber *timeNumber = @(timeStamp); - + //开始编码 OSStatus status = VTCompressionSessionEncodeFrame(compressionSession, pixelBuffer, presentationTimeStamp, duration, (__bridge CFDictionaryRef)properties, (__bridge_retained void *)timeNumber, &flags); if(status != noErr){ [self resetCompressionSession]; @@ -141,6 +155,7 @@ static void VideoCompressonOutputCallback(void *VTref, void *VTFrameRef, OSStatu CFDictionaryRef dic = (CFDictionaryRef)CFArrayGetValueAtIndex(array, 0); if (!dic) return; + // 判断当前帧是否为关键帧 BOOL keyframe = !CFDictionaryContainsKey(dic, kCMSampleAttachmentKey_NotSync); uint64_t timeStamp = [((__bridge_transfer NSNumber *)VTFrameRef) longLongValue]; @@ -149,7 +164,7 @@ static void VideoCompressonOutputCallback(void *VTref, void *VTFrameRef, OSStatu return; } - if (keyframe && !videoEncoder->sps) { + if (keyframe && !videoEncoder->sps) {//是关键帧,并且尚未设置sps(序列参数集) CMFormatDescriptionRef format = CMSampleBufferGetFormatDescription(sampleBuffer); size_t sparameterSetSize, sparameterSetCount; @@ -160,9 +175,12 @@ static void VideoCompressonOutputCallback(void *VTref, void *VTFrameRef, OSStatu const uint8_t *pparameterSet; OSStatus statusCode = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(format, 1, &pparameterSet, &pparameterSetSize, &pparameterSetCount, 0); if (statusCode == noErr) { - videoEncoder->sps = [NSData dataWithBytes:sparameterSet length:sparameterSetSize]; - videoEncoder->pps = [NSData dataWithBytes:pparameterSet length:pparameterSetSize]; + videoEncoder->sps = [NSData dataWithBytes:sparameterSet length:sparameterSetSize];//设置sps + videoEncoder->pps = [NSData dataWithBytes:pparameterSet length:pparameterSetSize];//这只pps + //数据处理时,sps pps 数据可以作为一个普通h264帧,放在h264视频流的最前面。 + //如果保存到文件中,需要将此数据前加上 [0 0 0 1] 4个字节,写入到h264文件的最前面。 + //如果推流,将此数据放入flv数据区即可。 if (videoEncoder->enabledWriteVideoFile) { NSMutableData *data = [[NSMutableData alloc] init]; uint8_t header[] = {0x00, 0x00, 0x00, 0x01}; @@ -177,28 +195,31 @@ static void VideoCompressonOutputCallback(void *VTref, void *VTFrameRef, OSStatu } } - + //获取视频数据 CMBlockBufferRef dataBuffer = CMSampleBufferGetDataBuffer(sampleBuffer); size_t length, totalLength; char *dataPointer; + //获取视频数据指针 数据大小 总数据大小 OSStatus statusCodeRet = CMBlockBufferGetDataPointer(dataBuffer, 0, &length, &totalLength, &dataPointer); if (statusCodeRet == noErr) { size_t bufferOffset = 0; static const int AVCCHeaderLength = 4; + // 循环获取nalu数据 while (bufferOffset < totalLength - AVCCHeaderLength) { // Read the NAL unit length uint32_t NALUnitLength = 0; memcpy(&NALUnitLength, dataPointer + bufferOffset, AVCCHeaderLength); - + //大小端转化,关于大端和小端模式,请参考此网址:http://blog.csdn.net/sunjie886/article/details/54944810 NALUnitLength = CFSwapInt32BigToHost(NALUnitLength); + //封装视频数据LFVideoFrame,方便以后推流 LFVideoFrame *videoFrame = [LFVideoFrame new]; videoFrame.timestamp = timeStamp; videoFrame.data = [[NSData alloc] initWithBytes:(dataPointer + bufferOffset + AVCCHeaderLength) length:NALUnitLength]; videoFrame.isKeyFrame = keyframe; videoFrame.sps = videoEncoder->sps; videoFrame.pps = videoEncoder->pps; - + //调用视频编码完成后的代理 if (videoEncoder.h264Delegate && [videoEncoder.h264Delegate respondsToSelector:@selector(videoEncoder:videoFrame:)]) { [videoEncoder.h264Delegate videoEncoder:videoEncoder videoFrame:videoFrame]; } diff --git a/LFLiveKit/publish/LFStreamRtmpSocket.m b/LFLiveKit/publish/LFStreamRtmpSocket.m index 373bca5f..ba3f3754 100644 --- a/LFLiveKit/publish/LFStreamRtmpSocket.m +++ b/LFLiveKit/publish/LFStreamRtmpSocket.m @@ -491,6 +491,8 @@ - (void)reconnect { dispatch_async(dispatch_get_main_queue(), ^{ /// 参考https://github.com/LaiFengiOS/LFLiveKit/pull/254/commits/a4cf18e0b9bea6d90a677c5b243991d9b7cf3193 [self performSelector:@selector(_delayedReconnect) withObject:nil afterDelay:self.reconnectInterval]; + +// [self performSelector:@selector(_reconnect) withObject:nil afterDelay:self.reconnectInterval]; }); } else if (self.retryTimes4netWorkBreaken >= self.reconnectCount) { @@ -503,7 +505,7 @@ - (void)reconnect { } }); } -//https://github.com/LaiFengiOS/LFLiveKit/pull/254/commits/a4cf18e0b9bea6d90a677c5b243991d9b7cf3193 // 防止阻塞ui +//https://github.com/LaiFengiOS/LFLiveKit/pull/254/commits/a4cf18e0b9bea6d90a677c5b243991d9b7cf3193 // 防止阻塞ui TODO cz - (void)_delayedReconnect { [NSObject cancelPreviousPerformRequestsWithTarget:self];