AVAssetWritter no funciona con audio

Estoy intentando que el audio funcione con el video para una aplicación de iOS. El video esta bien No se graba audio en el file (el altavoz de mi iPhone funciona).

Aquí está la configuration de inicio:

session = [[AVCaptureSession alloc] init]; menu->session = session; menu_open = NO; session.sessionPreset = AVCaptureSessionPresetMedium; camera = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo]; microphone = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio]; menu->camera = camera; [session beginConfiguration]; [camera lockForConfiguration:nil]; if([camera isExposureModeSupported:AVCaptureExposureModeContinuousAutoExposure]){ camera.exposureMode = AVCaptureExposureModeContinuousAutoExposure; } if([camera isFocusModeSupported:AVCaptureFocusModeContinuousAutoFocus]){ camera.focusMode = AVCaptureFocusModeContinuousAutoFocus; } if([camera isWhiteBalanceModeSupported:AVCaptureWhiteBalanceModeContinuousAutoWhiteBalance]){ camera.whiteBalanceMode = AVCaptureWhiteBalanceModeContinuousAutoWhiteBalance; } if ([camera hasTorch]) { if([camera isTorchModeSupported:AVCaptureTorchModeOn]){ [camera setTorchMode:AVCaptureTorchModeOn]; } } [camera unlockForConfiguration]; [session commitConfiguration]; AVCaptureDeviceInput * camera_input = [AVCaptureDeviceInput deviceInputWithDevice:camera error:nil]; [session addInput:camera_input]; microphone_input = [[AVCaptureDeviceInput deviceInputWithDevice:microphone error:nil] retain]; AVCaptureVideoDataOutput * output = [[[AVCaptureVideoDataOutput alloc] init] autorelease]; output.videoSettings = [NSDictionary dictionaryWithObject: [NSNumber numberWithInt:kCVPixelFormatType_32BGRA] forKey:(id)kCVPixelBufferPixelFormatTypeKey]; [session addOutput:output]; output.minFrameDuration = CMTimeMake(1,30); dispatch_queue_t queue = dispatch_queue_create("MY QUEUE", NULL); [output setSampleBufferDelegate:self queue:queue]; dispatch_release(queue); audio_output = [[[AVCaptureAudioDataOutput alloc] init] retain]; queue = dispatch_queue_create("MY QUEUE", NULL); AudioOutputBufferDelegate * special_delegate = [[[AudioOutputBufferDelegate alloc] init] autorelease]; special_delegate->normal_delegate = self; [special_delegate retain]; [audio_output setSampleBufferDelegate:special_delegate queue:queue]; dispatch_release(queue); [session startRunning]; 

Aquí está el comienzo y el final de la grabación:

 if (recording) { //Hence stop recording [video_button setTitle:@"Video" forState: UIControlStateNormal]; recording = NO; [writer_input markAsFinished]; [audio_writer_input markAsFinished]; [video_writer endSessionAtSourceTime: CMTimeMakeWithSeconds([[NSDate date] timeIntervalSinceDate: start_time],30)]; [video_writer finishWriting]; UISaveVideoAtPathToSavedPhotosAlbum(temp_url,self,@selector(video:didFinishSavingWithError:contextInfo:),nil); [start_time release]; [temp_url release]; [av_adaptor release]; [microphone lockForConfiguration:nil]; [session beginConfiguration]; [session removeInput:microphone_input]; [session removeOutput:audio_output]; [session commitConfiguration]; [microphone unlockForConfiguration]; [menu restateConfigiration]; [vid_off play]; }else{ //Start recording [vid_on play]; [microphone lockForConfiguration:nil]; [session beginConfiguration]; [session addInput:microphone_input]; [session addOutput:audio_output]; [session commitConfiguration]; [microphone unlockForConfiguration]; [menu restateConfigiration]; [video_button setTitle:@"Stop" forState: UIControlStateNormal]; recording = YES; NSError *error = nil; NSFileManager * file_manager = [[NSFileManager alloc] init]; temp_url = [[NSString alloc] initWithFormat:@"%@/%@", NSTemporaryDirectory(), @"temp.mp4"]; [file_manager removeItemAtPath: temp_url error:NULL]; [file_manager release]; video_writer = [[AVAssetWriter alloc] initWithURL: [NSURL fileURLWithPath:temp_url] fileType: AVFileTypeMPEG4 error: &error]; NSDictionary *video_settings = [NSDictionary dictionaryWithObjectsAndKeys: AVVideoCodecH264, AVVideoCodecKey,[NSNumber numberWithInt:360], AVVideoWidthKey,[NSNumber numberWithInt:480], AVVideoHeightKey,nil]; writer_input = [[AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo outputSettings:video_settings] retain]; AudioChannelLayout acl; bzero( &acl, sizeof(acl)); acl.mChannelLayoutTag = kAudioChannelLayoutTag_Mono; audio_writer_input = [[AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeAudio outputSettings: [NSDictionary dictionaryWithObjectsAndKeys: [NSNumber numberWithInt: kAudioFormatMPEG4AAC], AVFormatIDKey,[NSNumber numberWithInt: 1], AVNumberOfChannelsKey,[NSNumber numberWithFloat: 44100.0], AVSampleRateKey,[NSNumber numberWithInt: 64000], AVEncoderBitRateKey,[NSData dataWithBytes: &acl length: sizeof(acl) ], AVChannelLayoutKey,nil]] retain]; audio_writer_input.expectsMediaDataInRealTime = YES; av_adaptor = [[AVAssetWriterInputPixelBufferAdaptor assetWriterInputPixelBufferAdaptorWithAssetWriterInput: writer_input sourcePixelBufferAttributes:NULL] retain]; [video_writer addInput:writer_input]; [video_writer addInput: audio_writer_input]; [video_writer startWriting]; [video_writer startSessionAtSourceTime: CMTimeMake(0,1)]; start_time = [[NSDate alloc] init]; } 

Aquí está el delegado para el audio:

 @implementation AudioOutputBufferDelegate -(void)captureOutput: (AVCaptureOutput *) captureOutput didOutputSampleBuffer: (CMSampleBufferRef) sampleBuffer fromConnection: (AVCaptureConnection *) conenction{ if (normal_delegate->recording) { CMSampleBufferSetOutputPresentationTimeStamp(sampleBuffer,CMTimeMakeWithSeconds([[NSDate date] timeIntervalSinceDate: normal_delegate->start_time],30)); [normal_delegate->audio_writer_input appendSampleBuffer: sampleBuffer]; } } @end 

El método de video no importa porque funciona. "restateConfigiration" simplemente ordera la configuration de la session, de lo contrario, la antorcha se apaga, etc.:

 [session beginConfiguration]; switch (quality) { case Low: session.sessionPreset = AVCaptureSessionPresetLow; break; case Medium: session.sessionPreset = AVCaptureSessionPreset640x480; break; } [session commitConfiguration]; [camera lockForConfiguration:nil]; if([camera isExposureModeSupported:AVCaptureExposureModeContinuousAutoExposure]){ camera.exposureMode = AVCaptureExposureModeContinuousAutoExposure; } if([camera isFocusModeSupported:AVCaptureFocusModeContinuousAutoFocus]){ camera.focusMode = AVCaptureFocusModeContinuousAutoFocus; } if([camera isWhiteBalanceModeSupported:AVCaptureWhiteBalanceModeContinuousAutoWhiteBalance]){ camera.whiteBalanceMode = AVCaptureWhiteBalanceModeContinuousAutoWhiteBalance; } if ([camera hasTorch]) { if (torch) { if([camera isTorchModeSupported:AVCaptureTorchModeOn]){ [camera setTorchMode:AVCaptureTorchModeOn]; } }else{ if([camera isTorchModeSupported:AVCaptureTorchModeOff]){ [camera setTorchMode:AVCaptureTorchModeOff]; } } } [camera unlockForConfiguration]; 

Gracias por cualquier ayuda.

AVAssetWriter y audio

Esto puede ser el mismo problema que se menciona en la publicación vinculada. Intente comentar estas líneas

 [writer_input markAsFinished]; [audio_writer_input markAsFinished]; [video_writer endSessionAtSourceTime: CMTimeMakeWithSeconds([[NSDate date] timeIntervalSinceDate: start_time],30)]; 

Editar

No sé si la forma en que está configurando el sello de time de presentación es necesariamente incorrecta. La forma en que manejo esto es con una variable local que se establece en 0 al inicio. Luego, cuando mi delegado recibe el primer package que hago:

 if (_startTime.value == 0) { _startTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer); } 

y entonces

 [bufferWriter->writer startWriting]; [bufferWriter->writer startSessionAtSourceTime:_startTime]; 

Su código parece válido ya que está calculando la diferencia de time para cada package recibido. Sin embargo, AVFoundation calcula esto para usted y también optimiza las marcas de time para la location en el contenedor intercalado. Otra cosa de lo que no estoy seguro es de que cada CMSampleBufferRef para audio contiene más de 1 búfer de datos donde cada búfer de datos tiene su propio PTS. No estoy seguro de si la configuration del PTS ajusta automáticamente todos los demás búferes de datos.

Cuando mi código difiere del suyo, utilizo una sola queue de envío para audio y video. En la callback que uso (algún código eliminado).

 switch (bufferWriter->writer.status) { case AVAssetWriterStatusUnknown: if (_startTime.value == 0) { _startTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer); } [bufferWriter->writer startWriting]; [bufferWriter->writer startSessionAtSourceTime:_startTime]; //Break if not ready, otherwise fall through. if (bufferWriter->writer.status != AVAssetWriterStatusWriting) { break ; } case AVAssetWriterStatusWriting: if( captureOutput == self.captureManager.audioOutput) { if( !bufferWriter->audioIn.readyForMoreMediaData) { break; } @try { if( ![bufferWriter->audioIn appendSampleBuffer:sampleBuffer] ) { [self delegateMessage:@"Audio Writing Error" withType:ERROR]; } } @catch (NSException *e) { NSLog(@"Audio Exception: %@", [e reason]); } } else if( captureOutput == self.captureManager.videoOutput ) { if( !bufferWriter->videoIn.readyForMoreMediaData) { break;; } @try { if (!frontCamera) { if( ![bufferWriter->videoIn appendSampleBuffer:sampleBuffer] ) { [self delegateMessage:@"Video Writing Error" withType:ERROR]; } } else { CMTime pt = CMSampleBufferGetPresentationTimeStamp(sampleBuffer); flipBuffer(sampleBuffer, pixelBuffer); if( ![bufferWriter->adaptor appendPixelBuffer:pixelBuffer withPresentationTime:pt] ) { [self delegateMessage:@"Video Writing Error" withType:ERROR]; } } } @catch (NSException *e) { NSLog(@"Video Exception Exception: %@", [e reason]); } } break; case AVAssetWriterStatusCompleted: return; case AVAssetWriterStatusFailed: [self delegateMessage:@"Critical Error Writing Queues" withType:ERROR]; bufferWriter->writer_failed = YES ; _broadcastError = YES; [self stopCapture] ; return; case AVAssetWriterStatusCancelled: break; default: break; }