سؤال

In my app I am opening a video preview layer with this code:

    AVCaptureDeviceInput *captureInput = [AVCaptureDeviceInput 
                                      deviceInputWithDevice:[AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo] 
                                      error:nil];
/*We setupt the output*/
AVCaptureVideoDataOutput *captureOutput = [[AVCaptureVideoDataOutput alloc] init];


captureOutput.alwaysDiscardsLateVideoFrames = YES; 

dispatch_queue_t queue;
queue = dispatch_queue_create("cameraQueue", NULL);
[captureOutput setSampleBufferDelegate:self queue:queue];
dispatch_release(queue);
NSString* key = (NSString*)kCVPixelBufferPixelFormatTypeKey; 
NSNumber* value = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA]; 
NSDictionary* videoSettings = [NSDictionary dictionaryWithObject:value forKey:key]; 
[captureOutput setVideoSettings:videoSettings]; 

self.captureSession = [[AVCaptureSession alloc] init];
[self.captureSession addInput:captureInput];
[self.captureSession addOutput:captureOutput];
/*We use medium quality, ont the iPhone 4 this demo would be laging too much, the conversion in UIImage and CGImage demands too much ressources for a 720p resolution.*/
[self.captureSession setSessionPreset:AVCaptureSessionPresetMedium];



CGRect Vframe;
Vframe = CGRectMake(self.viewNo2.frame.origin.x, self.viewNo2.frame.origin.y, self.viewNo2.frame.size.width, self.viewNo2.frame.size.height);   



/*We add the Custom Layer (We need to change the orientation of the layer so that the video is displayed correctly)*/
self.customLayer = [CALayer layer];
self.customLayer.frame = Vframe;
self.customLayer.contentsGravity = kCAGravityResizeAspect;
[self.view.layer addSublayer:self.customLayer];


CGRect VFrame1;
VFrame1 = CGRectMake(self.viewNo3.frame.origin.x, self.viewNo3.frame.origin.y, self.viewNo3.frame.size.width, self.viewNo3.frame.size.height);   

/*We add the Custom Layer (We need to change the orientation of the layer so that the video is displayed correctly)*/
self.customLayer1 = [CALayer layer];
self.customLayer1.frame = VFrame1;
self.customLayer1.contentsGravity = kCAGravityResizeAspect;
[self.view.layer addSublayer:self.customLayer1]; 


///*We add the imageView*/
//self.imageView = [[UIImageView alloc] init];
//self.imageView.frame = CGRectMake(9, 9, 137, 441);
//[self.view addSubview:self.imageView];
/*We add the preview layer*/


CGRect VFrame2;
VFrame2 = CGRectMake(self.viewNo1.frame.origin.x, self.viewNo1.frame.origin.y, self.viewNo1.frame.size.width, self.viewNo1.frame.size.height);   

self.prevLayer = [AVCaptureVideoPreviewLayer layerWithSession: self.captureSession];
self.prevLayer.frame = VFrame2;

self.prevLayer.videoGravity = AVLayerVideoGravityResizeAspectFill;
[self.view.layer addSublayer: self.prevLayer];
/*We start the capture*/
[self.captureSession startRunning];

When I am trying to capture the screen with this method:

-(IBAction)Photo{

CGRect rect = [self.view bounds];
UIGraphicsBeginImageContextWithOptions(rect.size,YES,0.0f);
CGContextRef context = UIGraphicsGetCurrentContext();
[self.view.layer renderInContext:context];   
UIImage *viewImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
UIImageWriteToSavedPhotosAlbum(viewImage, nil, nil, nil);}

the prevLayer isn't being captured, am I missing something?

هل كانت مفيدة؟

المحلول

AVCaptureVideoPreviewLayer does not respond to capturing the screen with UIGraphicsGetImageFromCurrentImageContext(). It's just a strange rule that Apple has done. The only way to get the current screen image is to tap into the image data buffer from your AVCaptureInput. That can then be manually added onto your screenshot.

مرخصة بموجب: CC-BY-SA مع الإسناد
لا تنتمي إلى StackOverflow
scroll top