In Swift for iOS, I’ve an array of AVURLAsset
. I move it by a operate to sew/merge the video belongings collectively into one closing video. For every video, my objective is to overlay textual content centered within the body.
After I play the outputted video, the video belongings merge accurately, however I am unable to know why not one of the textual content overlays. I attempted following an current reply, however to no avail. Any steering could be extraordinarily appreciated..
func merge(movies: [AVURLAsset], completion: @escaping (_ url: URL, _ asset: AVAssetExportSession)->()) {
let videoComposition = AVMutableComposition()
var lastTime: CMTime = .zero
var rely = 0
var maxVideoSize = CGSize.zero // For figuring out the utmost video measurement
guard let videoCompositionTrack = videoComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else { return }
guard let audioCompositionTrack = videoComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else { return }
let mainComposition = AVMutableVideoComposition()
var parentLayers = [CALayer]() // To carry all particular person dad or mum layers
for video in movies {
if let videoTrack = video.tracks(withMediaType: .video)[safe: 0] {
videoCompositionTrack.preferredTransform = videoTrack.preferredTransform
do {
attempt videoCompositionTrack.insertTimeRange(CMTimeRangeMake(begin: .zero, length: video.length), of: videoTrack, at: lastTime)
if let audioTrack = video.tracks(withMediaType: .audio)[safe: 0] {
attempt audioCompositionTrack.insertTimeRange(CMTimeRangeMake(begin: .zero, length: video.length), of: audioTrack, at: lastTime)
}
lastTime = CMTimeAdd(lastTime, video.length)
// Receive video dimensions and replace max measurement if needed
let videoSize = videoTrack.naturalSize.making use of(videoTrack.preferredTransform)
let videoRect = CGRect(x: 0, y: 0, width: abs(videoSize.width), top: abs(videoSize.top))
if videoRect.width > maxVideoSize.width {
maxVideoSize.width = videoRect.width
}
if videoRect.top > maxVideoSize.top {
maxVideoSize.top = videoRect.top
}
// Create and configure the textual content layer for this section
let textLayer = CATextLayer()
textLayer.string = "TESTING"
textLayer.foregroundColor = UIColor.white.cgColor
textLayer.backgroundColor = UIColor.clear.cgColor
textLayer.fontSize = 100
textLayer.shadowOpacity = 0.5
textLayer.alignmentMode = .middle
textLayer.contentsScale = UIScreen.major.scale // Ensures textual content is sharp
textLayer.isWrapped = true // Permits textual content wrapping if wanted
// Calculate body for centrally aligned textual content
let textHeight: CGFloat = 120 // Modify as wanted
let textWidth: CGFloat = videoRect.width // Padding from edges
let xPos = (videoRect.width - textWidth) / 2
let yPos = (videoRect.top - textHeight) / 2
textLayer.body = CGRect(x: xPos, y: yPos, width: textWidth, top: textHeight)
print(textLayer.body)
// Create a dad or mum layer for video and textual content
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.body = videoRect
videoLayer.body = videoRect
textLayer.zPosition = 1 // Guaranteeing textual content layer is on high
parentLayer.addSublayer(videoLayer)
parentLayer.addSublayer(textLayer)
parentLayers.append(parentLayer) // Add to array
// Add dad or mum layer to video composition
let videoCompositionInstruction = AVMutableVideoCompositionInstruction()
videoCompositionInstruction.timeRange = CMTimeRangeMake(begin: .zero, length: video.length)
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
videoCompositionInstruction.layerInstructions = [layerInstruction]
mainComposition.directions.append(videoCompositionInstruction)
rely += 1
} catch {
print("Did not insert observe")
return
}
}
}
let mainParentLayer = CALayer()
mainParentLayer.body = CGRect(x: 0, y: 0, width: maxVideoSize.width, top: maxVideoSize.top)
for layer in parentLayers {
mainParentLayer.addSublayer(layer)
}
// Set the renderSize and frameDuration of the mainComposition
mainComposition.renderSize = maxVideoSize
mainComposition.frameDuration = CMTime(worth: 1, timescale: 30) // Assuming 30 fps
mainComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: mainParentLayer, in: mainParentLayer)
let outputUrl = NSURL.fileURL(withPath: NSTemporaryDirectory() + "mergedVid" + ".mp4")
guard let exporter = AVAssetExportSession(asset: videoComposition, presetName: AVAssetExportPresetHighestQuality) else { return }
exporter.videoComposition = mainComposition
exporter.outputURL = outputUrl
exporter.outputFileType = .mp4
exporter.shouldOptimizeForNetworkUse = true
exporter.exportAsynchronously {
DispatchQueue.major.async {
if let outputUrl = exporter.outputURL, exporter.standing == .accomplished {
completion(outputUrl, exporter)
} else if let error = exporter.error {
print("Export failed: (error.localizedDescription)")
}
}
}
play(video: exporter.asset)
}