Source code for cgl.plugins.otio.tools.fcp.fcp_extract_shots

from collections import OrderedDict
import os
import opentimelineio as otio
from cgl.plugins.otio.tools import extract_shots
from cgl.plugins.otio.tools.fcp import fcp_media_linker


[docs] def find_adapter_source(adapter_name): print(f"=== FINDING SOURCE FOR {adapter_name} ===") # Get the adapter module directly try: adapter_module = otio.adapters.from_name(adapter_name).module() print(f"Adapter module: {adapter_module}") # Get the file path of the module if hasattr(adapter_module, "__file__"): print(f"Source file: {adapter_module.__file__}") else: print("No __file__ attribute found") # Try to get more details from the plugin manifest manifest = otio.plugins.manifest() for plugin in manifest.plugins: if hasattr(plugin, "name") and plugin.name == adapter_name: print(f"Plugin details: {plugin}") if hasattr(plugin, "filepath"): print(f"Plugin filepath: {plugin.filepath}") except Exception as e: print(f"Error: {e}")
# Also check the actual adapter object
[docs] def inspect_adapter(adapter_name): print(f"\n=== INSPECTING {adapter_name} ADAPTER ===") try: adapter = otio.adapters.from_name(adapter_name) print(f"Adapter: {adapter}") print(f"Adapter class: {adapter.__class__}") print(f"Adapter module: {adapter.__module__}") # Try to find the source file module = inspect.getmodule(adapter) if module and hasattr(module, "__file__"): print(f"Module file: {module.__file__}") except Exception as e: print(f"Error inspecting adapter: {e}")
# # Run both # find_adapter_source("fcp_xml_nested") # inspect_adapter("fcp_xml_nested") SHOT_LABEL_COLOR = "Blue"
[docs] def is_shot(item): try: label = item.metadata.get("fcp_xml", {}).get("labels", {}).get("label2", None) except AttributeError: return False if label != SHOT_LABEL_COLOR: return False shot_name = extract_shots.parse_shot_name(item.name) return shot_name
[docs] def scale_rational(value, new_rate=24): """ convert to new frame rate """ if value.rate != new_rate: new_value = value.rescaled_to(new_rate) new_value = otio.opentime.RationalTime(round(new_value.value), new_value.rate) return new_value return value
[docs] def scale_ranges(in_range, new_rate=24): assert in_range start_time = scale_rational(in_range.start_time, new_rate) duration = scale_rational(in_range.duration, new_rate) return otio.opentime.TimeRange(start_time, duration)
[docs] def check_rate(source_range, rate): if source_range.start_time.rate != rate or source_range.duration.rate != rate: raise ValueError(f"Timeline rate not {rate} fps: {source_range}")
[docs] def parse_clip(clip): if isinstance(clip, otio.schema.Clip): target_url = extract_shots.get_target_url(clip) if target_url: new_clip = clip.deepcopy() new_clip.metadata.clear() new_clip.source_range = scale_ranges(clip.source_range) _, ext = os.path.splitext(target_url) if ext.lower() in (".psd", ".png", ".jpg"): default_range = otio.opentime.TimeRange( otio.opentime.RationalTime(0, 24), otio.opentime.RationalTime(0, 24) ) available_range = scale_ranges( new_clip.media_reference.available_range or default_range ) new_clip.media_reference.available_range = scale_ranges( new_clip.media_reference.available_range or default_range ) new_duration = max(720 * 2, 720 + available_range.duration.to_frames()) new_range = otio.opentime.TimeRange( otio.opentime.RationalTime(0, 24), otio.opentime.RationalTime(new_duration, 24), ) new_clip.media_reference.available_range = new_range new_source_range = otio.opentime.TimeRange( otio.opentime.RationalTime(720, 24), new_clip.source_range.duration ) new_clip.source_range = new_source_range return new_clip, target_url gap = otio.schema.Gap() gap.name = clip.name gap.source_range = scale_ranges(clip.source_range) return gap, None
[docs] def extend_timerange(timerange, head, tail): v = timerange.start_time start_time = otio.opentime.RationalTime(v.value + head, v.rate) v = timerange.duration duration = otio.opentime.RationalTime(v.value + tail, v.rate) return otio.opentime.TimeRange(start_time, duration)
[docs] def get_shot_track(item, abs_frame): shot_track = otio.schema.Track() shot_track.source_range = scale_ranges(item.source_range) shot_track.source_range = extend_timerange( shot_track.source_range, extract_shots.HANDLES, 0 ) shot_track.name = item.name handle_gap = otio.schema.Gap() handle_gap.source_range = otio.opentime.TimeRange( duration=otio.opentime.RationalTime(extract_shots.HANDLES, 24) ) shot_track.append(handle_gap.deepcopy()) if isinstance(item, otio.schema.Clip): parsed_clip, target_url = parse_clip(item) shot_track.append(parsed_clip) shot_track.append(handle_gap.deepcopy()) start_frame = parsed_clip.range_in_parent().start_time.value duration = parsed_clip.source_range.duration.value source_media = ( [[target_url, start_frame, start_frame + abs_frame - extract_shots.HANDLES]] if target_url else [] ) return shot_track, source_media if not isinstance(item, otio.schema.Stack): raise ValueError(f"Unknown Nested Clip Sub-Component type: {type(item)}") source_media = [] for child in item.find_children(shallow_search=True): if not isinstance(child, otio.schema.Track): continue if child.kind != otio.schema.TrackKind.Video: continue # logging.info(child) clips = child.find_children(shallow_search=True) if len(clips) == 0: continue for clip in clips: parsed_clip, target_url = parse_clip(clip) parsed_clip.source_range = scale_ranges(parsed_clip.source_range) shot_track.append(parsed_clip) if target_url: start_frame = parsed_clip.range_in_parent().start_time.value duration = parsed_clip.source_range.duration.value timeline_position = start_frame + abs_frame - extract_shots.HANDLES source_media.append( [ target_url, start_frame, timeline_position, duration, ] ) # only one track allowed break shot_track.append(handle_gap.deepcopy()) return shot_track, source_media
[docs] def simplify_timeline(xml_path): fcp_media_linker.register_media_linker() timeline = otio.adapters.read_from_file( xml_path, adapter_name="fcp_xml_nested", media_linker_name="fcp_media_linker" ) new_timeline = otio.schema.Timeline() new_timeline.global_start_time = scale_rational(timeline.global_start_time) sequence_name = extract_shots.parse_sequence_name(timeline.name) if not sequence_name: raise ValueError( f"Unable to parse sequence/scene name for timeline: {timeline.name}" ) new_timeline.name = sequence_name main_track = otio.schema.Track() shot_dict = OrderedDict() i = 0 for track in timeline.video_tracks(): new_track = otio.schema.Track(kind=track.kind) new_timeline.tracks.append(new_track) for item in track: if is_shot(item): i += 1 # item.name = extract_shots.clean_name(item.name) item.name = f"{i*10:04d}" check_rate(item.source_range, 24.0) range_in_parent = scale_ranges(item.range_in_parent()) abs_frame = ( new_timeline.global_start_time.value + range_in_parent.start_time.value ) shot_track, source_media = get_shot_track(item, abs_frame) main_track.append(shot_track) if not source_media: raise ValueError(f"Could not find source media for: {item.name}") if item.name in shot_dict: raise ValueError(f"Duplicate shot detected: {item.name}") shot_dict[item.name] = source_media else: if isinstance(item, otio.schema.Transition): t = item.deepcopy() t.in_offset = scale_rational(t.in_offset) t.out_offset = scale_rational(t.out_offset) main_track.append(t) else: # check_rate(item.source_range, 24.0) gap = otio.schema.Gap() gap.name = item.name gap.source_range = scale_ranges(item.source_range) main_track.append(gap) # only read first track break new_timeline.tracks.append(main_track) return new_timeline, shot_dict
if __name__ == "__main__": import sys xml_path = r"E:\Alchemy\jhcs\ttas\VERSIONS\0\000\render\shots\103\s01\SEQ\edt\default\tom\000.000\high\mdc_103.xml" new_timeline, shot_dict = simplify_timeline(xml_path) for key, values in shot_dict.items(): print(key, values) # print(parse_shot_name("21Aaz_0011")) # print(parse_sequence_name("Scene 16B")) # simplify_timeline(r"D:/Projects/Premise/Scene 16B_name_fix2.xml")