AVFoundation|AVFoundation - 根据图像生成视频
先给一个可以直接拿过去用的代码,然后我再解释一下.
+ (void)compressImage:(NSURL *)imageUrl
duration:(CGFloat)duration
size:(CGSize)imageSize
outputUrl:(NSURL *)exportUrl
completion:(void(^)(void))block {//get source
CGImageRef image = NULL;
CGImageSourceRef src = https://www.it610.com/article/CGImageSourceCreateWithURL((CFURLRef)imageUrl, NULL);
// 1
if (src) {
image = CGImageSourceCreateImageAtIndex(src, 0, NULL);
//config
size_t width = CGImageGetWidth(image);
size_t height = CGImageGetHeight(image);
CFRelease(src);
CGRect dstRect = AVMakeRectWithAspectRatioInsideRect(CGSizeMake(width, height), CGRectMake(0, 0, imageSize.width, imageSize.height));
//2
NSImage *scaleImage = [[NSImage alloc] initWithCGImage:image size:dstRect.size];
CVPixelBufferRef buffer = [ExportMovieUtil getBufferFromNSImage:scaleImage];
// 3
if (buffer) {
//config output setting
__block AVAssetWriter *videoWriter = [[AVAssetWriter alloc] initWithURL:exportUrl
fileType:AVFileTypeQuickTimeMovie
error:nil];
NSDictionary *videoSettings = [NSDictionary dictionaryWithObjectsAndKeys:AVVideoCodecTypeH264, AVVideoCodecKey,
[NSNumber numberWithInt:dstRect.size.width], AVVideoWidthKey,
[NSNumber numberWithInt:dstRect.size.height], AVVideoHeightKey, nil];
// 4
AVAssetWriterInput *writerInput = [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo outputSettings:videoSettings];
NSDictionary *sourcePixelBufferAttributesDictionary = [NSDictionary dictionaryWithObjectsAndKeys:[NSNumber numberWithInt:kCVPixelFormatType_32ARGB], kCVPixelBufferPixelFormatTypeKey, nil];
AVAssetWriterInputPixelBufferAdaptor *adaptor = [AVAssetWriterInputPixelBufferAdaptor
assetWriterInputPixelBufferAdaptorWithAssetWriterInput:writerInput sourcePixelBufferAttributes:sourcePixelBufferAttributesDictionary];
NSParameterAssert(writerInput);
NSParameterAssert([videoWriter canAddInput:writerInput]);
if ([videoWriter canAddInput:writerInput]) {
[videoWriter addInput:writerInput];
}[videoWriter startWriting];
[videoWriter startSessionAtSourceTime:kCMTimeZero];
//start to write
dispatch_queue_t dispatchQueue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
int __block frame = 0;
[writerInput requestMediaDataWhenReadyOnQueue:dispatchQueue usingBlock:^{
while ([writerInput isReadyForMoreMediaData]) {
if(++frame> duration * 30) {
[writerInput markAsFinished];
if(videoWriter.status == AVAssetWriterStatusWriting){
CFRelease(buffer);
[videoWriter finishWritingWithCompletionHandler:^{
!block?:block();
}];
}
break;
}if(![adaptor appendPixelBuffer:buffer withPresentationTime:CMTimeMake(frame, 30)]) {
NSLog(@"fail");
} else {
NSLog(@"success:%ld",(long)frame);
}
}
}];
}
}
}
【AVFoundation|AVFoundation - 根据图像生成视频】应该能看到我的代码里除了简单的注释还有1,2,3,4这样的小注释
- 1 .
CGImageSourceCreateWithURL
因为这里我需要的CGImage
,所以就不先生成UIImage
或者NSImage
了. - 2 .
AVMakeRectWithAspectRatioInsideRect
这个api真的是提供很多便利.这个api可以根据你提供的size,然后自动计算当前size按比例能缩放成多大的,然后返回.返回的Rect是一个居中显示的矩形.具体的可以自己试一试,可能我说的比较乱 - 3 .这是另一个函数了,不是系统的api,我会在下面po出,是将
NSImage
转换为CVPixelBufferRef
. - 4 .这边我们用AVAssetWriter 结合 AVAssetWriterInputPixelBufferAdaptor的方式来将图片写成视频,我这里默认写成一秒30帧.其实由于是静态图片,不用写得这么久,甚至可以一秒一帧.只要算好时间,算好关键帧,就ok.
+ (CVPixelBufferRef)getBufferFromNSImage:(NSImage *)image {
CVPixelBufferRef buffer = NULL;
// config
size_t width = [image size].width;
size_t height = [image size].height;
size_t bitsPerComponent = 8;
// set 8 bit per component
CGColorSpaceRef colorSpace = CGColorSpaceCreateWithName(kCGColorSpaceGenericRGB);
NSDictionary *options = [NSDictionary dictionaryWithObjectsAndKeys:[NSNumber numberWithBool:YES], kCVPixelBufferCGImageCompatibilityKey,
[NSNumber numberWithBool:YES], kCVPixelBufferCGBitmapContextCompatibilityKey, nil];
// create pixel buffer
CVPixelBufferCreate(kCFAllocatorDefault, width, height, k32ARGBPixelFormat, (__bridge CFDictionaryRef)options, &buffer);
CVPixelBufferLockBaseAddress(buffer, 0);
void *rasterData = https://www.it610.com/article/CVPixelBufferGetBaseAddress(buffer);
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(buffer);
// context to draw in, set to pixel buffer's address
CGContextRef ctxt = CGBitmapContextCreate(rasterData, width, height, bitsPerComponent, bytesPerRow, colorSpace, kCGImageAlphaPremultipliedFirst);
if(ctxt == NULL){
NSLog(@"could not create context");
return NULL;
}
// draw
NSGraphicsContext *nsctxt = [NSGraphicsContext graphicsContextWithGraphicsPort:ctxt flipped:NO];
[NSGraphicsContext saveGraphicsState];
[NSGraphicsContext setCurrentContext:nsctxt];
[image drawInRect:NSMakeRect(0, 0, width, height)];
[NSGraphicsContext restoreGraphicsState];
CVPixelBufferUnlockBaseAddress(buffer, 0);
CFRelease(ctxt);
return buffer;
}
推荐阅读
- Java|Java OpenCV图像处理之SIFT角点检测详解
- 记录iOS生成分享图片的一些问题,根据UIView生成固定尺寸的分享图片
- 运营是什么()
- ATAN2根据xy坐标计算角度
- ImageLoaders 加载图像
- JAVA图像处理系列(四)——噪声
- 使用交叉点观察器延迟加载图像以提高性能
- 周检视5/14-5/21(第三周)
- Arcgis根据经纬度批量提取属性值
- 2018-11-29|2018-11-29 今早新闻| Chenie