Exemple #1
0
def main():
    config = anyfig.setup_config(default_config='DebugConfig')
    joke = meta_utils.get_joke()
    print(f'Here is a programming joke while you wait ;)\n\n{joke}\n')
    prepare_xml(config.xml_file)
    qwe
    asset_paths = xml_utils.get_asset_paths(config.xml_file)

    docker_asset_paths = set([f'{p.parent}:{p.parent}' for p in asset_paths])
    docker_asset_paths = [f'-v {a}:ro' for a in docker_asset_paths]
    docker_asset_paths = ' '.join(docker_asset_paths)

    if config.bash:
        command = '/bin/bash'
    else:
        command = 'python extract_info.py ' + ' '.join(sys.argv[1:])

    project_root = meta_utils.get_project_root()
    output_volume = f"{project_root/'output'}:/sticker/output"
    args = f"docker run -it --rm --name stick -v {project_root}:/sticker -v {output_volume} {docker_asset_paths} sticker {command}"

    args = args.split()
    args = [a.replace('%20', ' ') for a in args]  # Spaces can be weird in Mac
    print(args)

    completed = subprocess.run(args)

    if completed.returncode == 0 and config.send_to_finalcut:
        xml_outpath = Path('output') / config.xml_file.name
        meta_utils.send_xml_to_finalcut(xml_outpath)
def main():
    ffmpeg_utils.assert_installed()
    joke = meta_utils.get_joke()
    print(f'A programming joke while you wait ;)\n\n{joke}\n')

    # config = anyfig.setup_config(default_config='DebugConfig')
    config = anyfig.setup_config(default_config='DevConfig')

    err_msg = "You have to specify an input file in the config.py file or send it as an input parameter to the program as such --xml_file=path/to/file.fcpxml"
    assert config.xml_file != '', err_msg

    if config.clear_outdir:
        meta_utils.clear_outdir()

    # If we're running from docker, some paths needs handling
    if config.using_docker and not config.test_install:
        path_base = '/host_root'
    else:
        path_base = ''

    xml_path = Path(config.xml_file)
    if xml_path.is_absolute():
        xml_path = Path(path_base) / str(xml_path)[1:]

    recognizer = config.recognizer
    analyzed_metadatum = []
    for asset in get_asset_files(xml_path):
        asset_path = Path(path_base) / asset['src']

        # Upload data to cloud
        data, cloud_file_name = recognizer.prepare_data(
            asset_path, config.google_bucket_name, config.unique_cloud_file_id)

        # Speech -> text -> actions
        actions = recognizer.find_actions(data, config.fake_data)

        if config.delete_cloud_file:
            print(f"Deleting file {cloud_file_name} from Google storage...")
            google_utils.delete_blob(config.google_bucket_name,
                                     cloud_file_name)
        analyzed_metadatum.append(dict(id=asset['id'], actions=actions))

    # Edit & save xml
    xml_tree = edit_xml.main(xml_path, analyzed_metadatum)
    output_dir = meta_utils.get_project_root() / 'output'
    xml_outpath = output_dir / f'enriched_{xml_path.name}'
    xml_utils.save_xml(xml_tree, str(xml_outpath))

    # Send xml to final cut
    if config.send_to_finalcut and not config.using_docker:
        meta_utils.send_xml_to_finalcut(xml_outpath)
    elif config.send_to_finalcut:
        sys.exit(42)
def main():
    config = anyfig.setup_config(default_config='Config')
    print(config)
    input_file = config.input_file
    assert input_file.exists(), f'Wrong path to video: {input_file}'

    qr_detections = find_detections(config)

    qr_detections = squeeze_detections(qr_detections,
                                       config.detection_squeeze_window)

    action_frames = format_detections(qr_detections, config)

    print(f"Actions: {action_frames}")

    split_info = split_video(action_frames, input_file, config.fps)
    save_split_info(split_info, input_file)

    cv2.waitKey(1)
    config.video_cap.release()
    cv2.destroyAllWindows()
def main():
  config = anyfig.setup_config(default_config='DebugConfig')
  if config.clear_outdir:
    meta_utils.clear_outdir()

  print(config)
  recognizer = config.recognizer

  analyzed_metadatum = []
  for asset in get_asset_files(config.xml_file):
    # Upload data to cloud
    data, cloud_file_name = recognizer.prepare_data(
      asset['src'], config.google_bucket_name, config.unique_cloud_file_id)

    # Speech -> text -> actions
    actions = recognizer.find_actions(data, config.fake_data)

    if config.delete_cloud_file:
      print(f"Deleting file {cloud_file_name} from Google storage...")
      google_utils.delete_blob(config.google_bucket_name, cloud_file_name)
    analyzed_metadatum.append(dict(id=asset['id'], actions=actions))

  edit_xml.main(config.xml_file, analyzed_metadatum, config.send_to_finalcut)
Exemple #5
0
            optimizer.zero_grad()

            # Decrease learning rate
            lr_scheduler.step()


def setup_dataloaders(config):
    train_loader = get_trainloader(config)
    val_loader = get_valloader(config)
    return train_loader, val_loader


def setup_train(config):
    model = get_model(config)
    optimizer = torch.optim.Adam(model.parameters(), lr=config.start_lr)
    lr_scheduler = CosineAnnealingLR(optimizer,
                                     T_max=config.optim_steps,
                                     eta_min=config.end_lr)
    logger = Logger(config)
    validator = Validator(config, logger)

    return model, optimizer, lr_scheduler, logger, validator


if __name__ == '__main__':
    config = anyfig.setup_config(default_config='Cookie')
    print(config)
    print('\n{}\n'.format(config.save_comment))
    seed_program(config.seed)
    train(config)
def main():
    config = anyfig.setup_config(default_config='Train')
    print(config)
Exemple #7
0
            labels = labels.to(model.device)

            # Validation
            # if optim_steps % val_freq == 0:
            #   validator.validate(model, dataloaders.val, optim_steps)

            # Forward pass
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = loss_fn(outputs, labels)

            # Backward pass
            loss.backward()
            optimizer.step()
            optim_steps += 1

            # Decrease learning rate
            lr_scheduler.step()

            # Log
            accuracy = metrics['accuracy'](outputs, labels)
            logger.log_accuracy(accuracy.item(), optim_steps)


if __name__ == '__main__':
    config = anyfig.setup_config(default_config=configs.TrainLaptop)
    print(config)  # Remove if you dont want to see config at start
    print('\n{}\n'.format(config.misc.save_comment))
    setup_utils.setup(config.misc)
    train(config)