Esempio n. 1
0
def post_train_detector_job(dataset_name,
                            run_name,
                            epochs,
                            import_datasets=""):
    dataset_name = quote(dataset_name)
    run_name = quote(run_name)
    rc = RunConfig(dataset_name, run_name)
    dc = DatasetConfig(dataset_name)
    if rc.exists and dc.exists:
        cmd = [
            python_path, "training_script.py",
            "--name={}".format(dataset_name),
            "--experiment={}".format(run_name),
            "--input_shape={}".format(rc.get('detector_resolution')),
            "--train_data_dir=fjlfbwjefrlbwelrfb_man_we_need_a_better_detector_codebase",
            "--batch_size={}".format(rc.get('detection_training_batch_size')),
            "--image_shape={}".format(dc.get('video_resolution')),
            "--epochs={}".format(epochs)
        ]

        if import_datasets:
            import_datasets = quote(import_datasets)
            cmd.append("--import_datasets={}".format(import_datasets))

        job_id = jm.run(cmd, "train_detector")
        if job_id:
            return (job_id, 202)
        else:
            return (NoContent, 503)
    else:
        return (NoContent, 404)
Esempio n. 2
0
def main():
    parser = ArgumentParser(description="Run Gate APIv4 demo application")
    parser.add_argument("-k", "--key", required=True, help="Gate APIv4 Key")
    parser.add_argument("-s",
                        "--secret",
                        required=True,
                        help="Gate APIv4 Secret")
    parser.add_argument("-u",
                        "--url",
                        required=False,
                        help="API base URL used to test")
    parser.add_argument("tests", nargs='+', help="tests to run")
    options = parser.parse_args()

    host_used = options.url
    if not host_used:
        host_used = "https://api.gateio.ws/api/v4"
    if not host_used.startswith("http"):
        host_used = "https://" + host_used
    host_used = host_used.rstrip("/")
    if not host_used.endswith("/api/v4"):
        host_used += '/api/v4'

    run_config = RunConfig(options.key, options.secret, host_used)
    for t in options.tests:
        logger.info("run %s API demo", t)
        if t == 'spot':
            spot_demo(run_config)
        elif t == 'margin':
            margin_demo(run_config)
        elif t == 'futures':
            futures_demo(run_config)
        else:
            logger.warning("ignore unknown test %s", t)
Esempio n. 3
0
def get_run_config(dataset_name, run_name):
    dataset_name = quote(dataset_name)
    rc = RunConfig(dataset_name, run_name)
    if rc.exists:
        return rc.get_data()
    else:
        return (NoContent, 404)
Esempio n. 4
0
def post_run_config(dataset_name, run_name, run_config):
    if ' ' in run_name:
        return ("Spaces are not allowed in run names!", 500)

    dataset_name = quote(dataset_name)
    run_name = quote(run_name)
    rc = RunConfig(dataset_name, run_name)
    if rc.set_data(run_config):
        rc.save()
        return (NoContent, 200)
    else:
        return ("Could not interpret run configuration. Is some required parameter missing?", 500)
Esempio n. 5
0
def post_detect_objects_job(dataset_name, run_name):
    dataset_name = quote(dataset_name)
    run_name = quote(run_name)
    rc = RunConfig(dataset_name, run_name)
    if rc.exists:
        cmd = [python_path, "detect_csv.py",
               "--dataset={}".format(dataset_name),
               "--run={}".format(run_name),
               "--res={}".format(rc.get("detector_resolution")),
               "--conf={}".format(rc.get("confidence_threshold")),
               "--bs={}".format(rc.get("detection_batch_size"))]

        job_id = jm.run(cmd, "detect_objects")
        if job_id:
            return (job_id, 202)
        else:
            return (NoContent, 503)
    else:
        return (NoContent, 404)
Esempio n. 6
0
def post_tracking_world_coordinates_job(dataset_name, run_name, confidence_threshold, make_videos):
    dataset_name = quote(dataset_name)
    run_name = quote(run_name)
    rc = RunConfig(dataset_name, run_name)
    dc = DatasetConfig(dataset_name)
    if rc.exists and dc.exists:
        cmd = [python_path, "tracking_world.py",
               "--cmd=findvids",
               "--dataset={}".format(dataset_name),
               "--run={}".format(run_name),
               "--conf={}".format(confidence_threshold),
               "--make_videos={}".format(make_videos)]

        job_id = jm.run(cmd, "tracking_world_coordinates")
        if job_id:
            return (job_id, 202)
        else:
            return (NoContent, 503)
    else:
        return (NoContent, 404)
Esempio n. 7
0
def post_optimize_tracking_world_coordinates_job(
        csv_ground_truth_file, dataset_name, run_name, date, detection_id,
        class_name_conversion, visualize, patience, per_iteration):
    dataset_name = quote(dataset_name)
    run_name = quote(run_name)

    rc = RunConfig(dataset_name, run_name)
    dc = DatasetConfig(dataset_name)
    if rc.exists and dc.exists:
        this_run_path = runs_path / "{dn}_{rn}".format(dn=dataset_name,
                                                       rn=run_name)
        csv_path = this_run_path / "world_trajectory_gt.csv"

        try:
            gt = csv_ground_truth_file.decode('utf-8')
        except:
            return ("Could not parse .csv file as UTF-8", 400)
        else:
            with csv_path.open('w') as f:
                f.write(gt)

            cmd = [
                python_path, "tracking_world_optimization.py",
                "--dataset={}".format(dataset_name),
                "--run={}".format(run_name), "--date={}".format(date),
                "--gt_csv={}".format(csv_path),
                "--det_id={}".format(detection_id),
                "--gt_class_name_conversion={}".format(class_name_conversion),
                "--visualize={}".format(visualize),
                "--patience={}".format(patience),
                "--per_iteration={}".format(per_iteration)
            ]

            job_id = jm.run(cmd, "optimize_tracking_world_coordinates")
            if job_id:
                return (job_id, 202)
            else:
                return (NoContent, 404)
    else:
        s = dataset_name + '_' + run_name
        return (s, 404)
Esempio n. 8
0
def post_visualize_detections_job(dataset_name, run_name, confidence_threshold, coords):
    dataset_name = quote(dataset_name)
    run_name = quote(run_name)
    rc = RunConfig(dataset_name, run_name)
    dc = DatasetConfig(dataset_name)
    if rc.exists and dc.exists:
        cmd = [python_path, "visualize_detections.py",
               "--cmd=findvids",
               "--dataset={}".format(dataset_name),
               "--run={}".format(run_name),
               "--res={}".format(rc.get("detector_resolution")),
               "--conf={}".format(confidence_threshold),
               "--fps={}".format(dc.get('video_fps')),
               "--coords={}".format(coords)]

        job_id = jm.run(cmd, "visualize_detections")
        if job_id:
            return (job_id, 202)
        else:
            return (NoContent, 503)
    else:
        return (NoContent, 404)
Esempio n. 9
0
def post_detections_to_world_coordinates_job(dataset_name, run_name,
                                             make_videos):
    dataset_name = quote(dataset_name)
    run_name = quote(run_name)
    rc = RunConfig(dataset_name, run_name)
    dc = DatasetConfig(dataset_name)
    if rc.exists and dc.exists:
        cmd = [
            python_path, "detections_world.py", "--cmd=findvids",
            "--dataset={}".format(dataset_name), "--run={}".format(run_name),
            "--make_videos={}".format(make_videos),
            "--ssdres={}".format(rc.get("detector_resolution")),
            "--vidres={}".format(dc.get('video_resolution')),
            "--kltres={}".format(dc.get('point_track_resolution'))
        ]

        job_id = jm.run(cmd, "detections_to_world")
        if job_id:
            return (job_id, 202)
        else:
            return (NoContent, 503)
    else:
        return (NoContent, 404)
Esempio n. 10
0
    else:
        models = glob('output/models/*.model')
        models.sort(key=lambda x: os.path.getmtime(x), reverse=True)
        if os.name == 'nt':
            model_name = models[0].split('\\')[1].split('.')[0]
        else:
            model_name = models[0].replace('output/models/', '').split('.')[0]

    data_file = args.prediction
    prediction.predict(model_name, data_file, use_gpu=use_gpu, show_ui=args.show_ui)
    exit()

#Retrieve config file and create interval config dictionary.
config_file = "configurations/" + args.config + ".config"
write_out('Using config: %s' % config_file)
configs = RunConfig(config_file)

if configs.run_params["hide_ui"]:
    write_out("Live plot deactivated, see output folder for plot.")

max_seq_length, use_evolutionary, n_proteins = configs.run_params["max_sequence_length"], configs.run_params["use_evolutionary"], configs.run_params["n_proteins"]


# start web server
if not configs.run_params["hide_ui"]:
    start_dashboard_server()


process_raw_data(use_gpu, n_proteins=n_proteins, max_sequence_length=max_seq_length, force_pre_processing_overwrite=False)

datafolder = "data/preprocessed/"
Esempio n. 11
0
def loop(args):
    # create config and model collection objects, and retrieve the run config
    configs = {}
    models = {}
    configs.update({'run': RunConfig(args.config_file)})

    # set GPU-related environmental options and config settings
    os.environ['CUDA_VISIBLE_DEVICES'] = str(
        args.gpu) if args.gpu is not None else ''
    setproctitle('RGN ' + configs['run'].names['run'] + ' on ' +
                 os.getenv('CUDA_VISIBLE_DEVICES', 'CPU'))

    # derived files and directories
    base_dir = args.base_directory
    print('base_dir=', base_dir)
    run_dir = os.path.join(base_dir, RUNS_DIRNAME, configs['run'].names['run'],
                           configs['run'].names['dataset'])
    print('run_dir=', run_dir)
    data_dir = os.path.join(base_dir, DATAS_DIRNAME,
                            configs['run'].names['dataset'])
    print('data_dir=', data_dir)
    checkpoints_dir = os.path.join(run_dir, CHECKPOINTS_DIRNAME, '')
    print('checkpoints_dir=', checkpoints_dir)

    logs_dir = os.path.join(run_dir, LOGS_DIRNAME, '')
    print('logs_dir=', logs_dir)
    stdout_err_file = os.path.join(base_dir, LOGS_DIRNAME,
                                   configs['run'].names['run'] + '.log')
    alphabet_file = os.path.join(
        data_dir, ALPHABETS_DIRNAME, configs['run'].names['alphabet'] +
        '.csv') if configs['run'].names['alphabet'] is not None else None

    # this is all for evaluation models (including training, so training_batch_size is for evaluation)
    full_training_glob = os.path.join(data_dir, FULL_TRAINING_DIRNAME,
                                      configs['run'].io['full_training_glob'])
    sample_training_glob = os.path.join(
        data_dir, FULL_TRAINING_DIRNAME,
        configs['run'].io['sample_training_glob'])
    training_batch_size = configs['run'].evaluation['num_training_samples']
    training_invocations = configs['run'].evaluation[
        'num_training_invocations']

    validation_glob = os.path.join(data_dir, SAMPLE_VALIDATION_DIRNAME,
                                   configs['run'].io['sample_validation_glob'])
    validation_batch_size = configs['run'].evaluation['num_validation_samples']
    validation_invocations = configs['run'].evaluation[
        'num_validation_invocations']

    testing_glob = os.path.join(data_dir, FULL_TESTING_DIRNAME,
                                configs['run'].io['full_testing_glob'])
    testing_batch_size = configs['run'].evaluation['num_testing_samples']
    testing_invocations = configs['run'].evaluation['num_testing_invocations']

    if not args.prediction_only:
        eval_num_epochs = None
    else:
        eval_num_epochs = 1
        training_batch_size = validation_batch_size = testing_batch_size = 1
        training_invocations = validation_invocations = testing_invocations = 1

    # redirect stdout/err to file
    sys.stderr.flush()
    if not os.path.exists(os.path.dirname(stdout_err_file)):
        os.makedirs(os.path.dirname(stdout_err_file))
    stdout_err_file_handle = open(stdout_err_file, 'w')
    os.dup2(stdout_err_file_handle.fileno(), sys.stderr.fileno())
    sys.stdout = stdout_err_file_handle

    # select device placement taking into consideration the interaction between training and evaluation models
    if configs['run'].computing['training_device'] == 'GPU' and configs[
            'run'].computing['evaluation_device'] == 'GPU':
        fod_training = {'/cpu:0': ['point_to_coordinate']}
        fod_evaluation = {'/cpu:0': ['point_to_coordinate']}
        dd_training = ''
        dd_evaluation = ''
    elif configs['run'].computing['training_device'] == 'GPU' and configs[
            'run'].computing['evaluation_device'] == 'CPU':
        fod_training = {'/cpu:0': ['point_to_coordinate', 'loss_history']}
        fod_evaluation = {}
        dd_training = ''
        dd_evaluation = '/cpu:0'
    else:
        fod_training = {}
        fod_evaluation = {}
        dd_training = '/cpu:0'
        dd_evaluation = '/cpu:0'

    # create models configuration templates
    configs.update({
        'training':
        RGNConfig(
            args.config_file, {
                'name':
                'training',
                'dataFilesGlob':
                full_training_glob,
                'checkpointsDirectory':
                checkpoints_dir,
                'logsDirectory':
                logs_dir,
                'fileQueueCapacity':
                configs['run'].queueing['training_file_queue_capacity'],
                'batchQueueCapacity':
                configs['run'].queueing['training_batch_queue_capacity'],
                'minAfterDequeue':
                configs['run'].queueing['training_min_after_dequeue'],
                'shuffle':
                configs['run'].queueing['training_shuffle'],
                'tertiaryNormalization':
                configs['run'].loss['training_tertiary_normalization'],
                'batchDependentNormalization':
                configs['run'].loss['training_batch_dependent_normalization'],
                'alphabetFile':
                alphabet_file,
                'functionsOnDevices':
                fod_training,
                'defaultDevice':
                dd_training,
                'fillGPU':
                args.fill_gpu
            })
    })

    configs.update({
        'evaluation':
        RGNConfig(
            args.config_file, {
                'fileQueueCapacity':
                configs['run'].queueing['evaluation_file_queue_capacity'],
                'batchQueueCapacity':
                configs['run'].queueing['evaluation_batch_queue_capacity'],
                'minAfterDequeue':
                configs['run'].queueing['evaluation_min_after_dequeue'],
                'shuffle':
                configs['run'].queueing['evaluation_shuffle'],
                'tertiaryNormalization':
                configs['run'].loss['evaluation_tertiary_normalization'],
                'batchDependentNormalization':
                configs['run'].
                loss['evaluation_batch_dependent_normalization'],
                'alphabetFile':
                alphabet_file,
                'functionsOnDevices':
                fod_evaluation,
                'defaultDevice':
                dd_evaluation,
                'numEpochs':
                eval_num_epochs,
                'bucketBoundaries':
                None
            })
    })

    # Override included evaluation models with list from command-line if specified (assumes none are included and then includes ones that are specified)
    if args.evaluation_model:
        for prefix in ['', 'un']:
            for group in ['training', 'validation', 'testing']:
                configs['run'].evaluation.update(
                    {'include_' + prefix + 'weighted_' + group: False})
        for entry in args.evaluation_model:
            configs['run'].evaluation.update({'include_' + entry: True})

    # Override other command-lind arguments
    if args.gpu_fraction:
        configs['training'].computing.update(
            {'gpu_fraction': args.gpu_fraction})
    if args.milestone:
        configs['run'].optimization.update(
            {'validation_milestone': dict(args.milestone)})

    # Ensure that correct validation reference is chosen if not predicting, and turn off evaluation loss if predicting
    if not args.prediction_only:
        if ((not configs['run'].evaluation['include_weighted_validation'])   and configs['run'].optimization['validation_reference'] == 'weighted') or \
           ((not configs['run'].evaluation['include_unweighted_validation']) and configs['run'].optimization['validation_reference'] == 'unweighted'):
            raise RuntimeError(
                'Chosen validation reference is not included in run.')
    else:
        configs['evaluation'].loss['include'] = False

    # rescaling needed to adjust for how frequently loss_history is updated
    if configs['training'].curriculum['behavior'] == 'loss_change':
        configs['training'].curriculum['change_num_iterations'] //= configs[
            'run'].io['evaluation_frequency']  # result must be >=1
        configs['evaluation'].curriculum['change_num_iterations'] //= configs[
            'run'].io['evaluation_frequency']  # ditto

    # create training model
    models = {}
    models.update({'training': RGNModel('training', configs['training'])})
    print('*** training configuration ***')
    pprint(configs['training'].__dict__)

    # create weighted training evaluation model (conditional)
    if configs['run'].evaluation['include_weighted_training']:
        configs.update({'eval_wt_train': deepcopy(configs['evaluation'])})
        configs['eval_wt_train'].io['name'] = 'evaluation_wt_training'
        configs['eval_wt_train'].io['data_files_glob'] = sample_training_glob
        configs['eval_wt_train'].optimization[
            'batch_size'] = training_batch_size
        configs['eval_wt_train'].queueing[
            'num_evaluation_invocations'] = training_invocations
        models.update({
            'eval_wt_train':
            RGNModel('evaluation', configs['eval_wt_train'])
        })
        print('\n\n\n*** weighted training evaluation configuration ***')
        pprint(configs['eval_wt_train'].__dict__)

    # create weighted validation evaluation model (conditional)
    if configs['run'].evaluation['include_weighted_validation']:
        configs.update({'eval_wt_val': deepcopy(configs['evaluation'])})
        configs['eval_wt_val'].io['name'] = 'evaluation_wt_validation'
        configs['eval_wt_val'].io['data_files_glob'] = validation_glob
        configs['eval_wt_val'].optimization[
            'batch_size'] = validation_batch_size
        configs['eval_wt_val'].queueing[
            'num_evaluation_invocations'] = validation_invocations
        if configs['run'].optimization['validation_reference'] == 'weighted':
            configs['eval_wt_val'].curriculum['update_loss_history'] = True
        models.update(
            {'eval_wt_val': RGNModel('evaluation', configs['eval_wt_val'])})
        print('\n\n\n*** weighted validation evaluation configuration ***')
        pprint(configs['eval_wt_val'].__dict__)

    # create weighted testing evaluation model (conditional)
    if configs['run'].evaluation['include_weighted_testing']:
        configs.update({'eval_wt_test': deepcopy(configs['evaluation'])})
        configs['eval_wt_test'].io['name'] = 'evaluation_wt_testing'
        configs['eval_wt_test'].io['data_files_glob'] = testing_glob
        configs['eval_wt_test'].optimization['batch_size'] = testing_batch_size
        configs['eval_wt_test'].queueing[
            'num_evaluation_invocations'] = testing_invocations
        models.update(
            {'eval_wt_test': RGNModel('evaluation', configs['eval_wt_test'])})
        print('\n\n\n*** weighted testing evaluation configuration ***')
        pprint(configs['eval_wt_test'].__dict__)

    # create equivalents for unweighted loss if there's a curriculum.
    if configs['training'].curriculum['mode'] is not None:
        # create unweighted training evaluation model (conditional)
        if configs['run'].evaluation['include_unweighted_training']:
            configs.update(
                {'eval_unwt_train': deepcopy(configs['evaluation'])})
            configs['eval_unwt_train'].io['name'] = 'evaluation_unwt_training'
            configs['eval_unwt_train'].io[
                'data_files_glob'] = sample_training_glob
            configs['eval_unwt_train'].optimization[
                'batch_size'] = training_batch_size
            configs['eval_unwt_train'].queueing[
                'num_evaluation_invocations'] = training_invocations
            configs['eval_unwt_train'].curriculum['mode'] = None
            configs['eval_unwt_train'].curriculum['behavior'] = None
            models.update({
                'eval_unwt_train':
                RGNModel('evaluation', configs['eval_unwt_train'])
            })

        # create unweighted validation evaluation model (conditional)
        if configs['run'].evaluation['include_unweighted_validation']:
            configs.update({'eval_unwt_val': deepcopy(configs['evaluation'])})
            configs['eval_unwt_val'].io['name'] = 'evaluation_unwt_validation'
            configs['eval_unwt_val'].io['data_files_glob'] = validation_glob
            configs['eval_unwt_val'].optimization[
                'batch_size'] = validation_batch_size
            configs['eval_unwt_val'].queueing[
                'num_evaluation_invocations'] = validation_invocations
            configs['eval_unwt_val'].curriculum['mode'] = None
            configs['eval_unwt_val'].curriculum['behavior'] = None
            if configs['run'].optimization[
                    'validation_reference'] == 'unweighted':
                configs['eval_unwt_val'].curriculum[
                    'update_loss_history'] = True
            models.update({
                'eval_unwt_val':
                RGNModel('evaluation', configs['eval_unwt_val'])
            })

        # create unweighted testing evaluation model (conditional)
        if configs['run'].evaluation['include_unweighted_testing']:
            configs.update({'eval_unwt_test': deepcopy(configs['evaluation'])})
            configs['eval_unwt_test'].io['name'] = 'evaluation_unwt_testing'
            configs['eval_unwt_test'].io['data_files_glob'] = testing_glob
            configs['eval_unwt_test'].optimization[
                'batch_size'] = testing_batch_size
            configs['eval_unwt_test'].queueing[
                'num_evaluation_invocations'] = testing_invocations
            configs['eval_unwt_test'].curriculum['mode'] = None
            configs['eval_unwt_test'].curriculum['behavior'] = None
            models.update({
                'eval_unwt_test':
                RGNModel('evaluation', configs['eval_unwt_test'])
            })

    # start head model and related prep
    stdout_err_file_handle.flush()
    session = models['training'].start(models.values())
    global_step = models['training'].current_step(session)
    current_log_step = (global_step //
                        configs['run'].io['prediction_frequency']) + 1
    log_dir = os.path.join(run_dir, str(current_log_step))
    restart = False

    # predict or train depending on set mode behavior
    if args.prediction_only:
        try:
            while not models['training'].is_done():
                predict_and_log(log_dir, configs, models, session)
        except tf.errors.OutOfRangeError:
            pass
        except:
            print('Unexpected error: ', sys.exc_info()[0])
            raise
        finally:
            if models['training']._is_started:
                models['training'].finish(session, save=False)
            stdout_err_file_handle.close()
    else:
        # clean up post last checkpoint residue if any
        if global_step != 0:
            # remove future directories
            last_log_step = sorted([
                int(os.path.basename(os.path.normpath(dir)))
                for dir in glob(os.path.join(run_dir, '*[0-9]'))
            ])[-1]
            for step in range(current_log_step + 1, last_log_step + 1):
                rmtree(os.path.join(run_dir, str(step)))

            # remove future log entries in current log files
            log_file = os.path.join(log_dir, 'error.log')
            if os.path.exists(log_file):
                with open(log_file, 'rw+') as f:
                    while True:
                        new_line = f.readline().split()
                        if len(new_line) > 1:
                            step = int(new_line[1])
                            if step == global_step:
                                f.truncate()
                                break
                        else:  # reached end without seeing global_step, means checkpoint is ahead of last recorded log entry
                            break

        # training loop
        try:
            while not models['training'].is_done():
                # Train for one step
                global_step, ids = models['training'].train(session)

                # Set and create logging directory and files if needed
                log_dir = os.path.join(
                    run_dir,
                    str((global_step //
                         configs['run'].io['prediction_frequency']) + 1))
                log_file = os.path.join(log_dir, 'error.log')
                if not os.path.exists(log_dir): os.makedirs(log_dir)

                # Evaluate error, get diagnostics, and raise exceptions if necessary
                if global_step % configs['run'].io['evaluation_frequency'] == 0:
                    diagnostics = evaluate_and_log(log_file, configs, models,
                                                   session)

                    # restart if a milestone is missed
                    val_ref_set_prefix = 'un' if configs['run'].optimization[
                        'validation_reference'] == 'unweighted' else ''
                    min_loss_achieved = diagnostics[
                        val_ref_set_prefix +
                        'wt_val_loss']['min_tertiary_loss_achieved_all']
                    for step, loss in configs['run'].optimization[
                            'validation_milestone'].iteritems():
                        if global_step >= step and min_loss_achieved > loss:
                            raise MilestoneError('Milestone at step ' + str(global_step) + \
                                                 ' missed because minimum loss achieved so far is ' + str(min_loss_achieved))

                    # restart if gradients are zero
                    if (diagnostics['min_grad'] == 0 and diagnostics['max_grad'] == 0) or \
                       (configs['run'].evaluation['include_diagnostics'] and (np.isnan(diagnostics['min_grad']) or np.isnan(diagnostics['max_grad']))):
                        raise DeadGradientError('Gradient is dead.')

                # Predict structures. Currently assumes that weighted training and validation models are available, and fails if they're not.
                if global_step % configs['run'].io['prediction_frequency'] == 0:
                    predict_and_log(log_dir, configs, models, session)

                # Checkpoint
                if global_step % configs['run'].io['checkpoint_frequency'] == 0:
                    models['training'].save(session)

        except tf.errors.OutOfRangeError:
            print('Epoch limit reached.')

        except (tf.errors.InvalidArgumentError, DeadGradientError
                ):  # InvalidArgumentError is usually triggered by a nan
            models['training'].finish(session, save=False)

            if args.restart_on_dead_gradient:
                print(
                    'Nan or dead gradient encountered; model will be resumed from last checkpoint if one exists, or restarted from scratch otherwise.'
                )
                if not os.path.isdir(checkpoints_dir):
                    for sub_dir in next(os.walk(run_dir))[1]:
                        rmtree(os.path.join(
                            run_dir, sub_dir))  # erase all old directories
                restart = True
            else:
                print(
                    'Nan or dead gradient encountered; model will be terminated.'
                )

        except MilestoneError:
            models['training'].finish(session, save=False)

            if args.restart_on_missed_milestone:
                print(
                    'Milestone missed; model will be restarted from scratch with an incremented seed.'
                )

                for sub_dir in next(os.walk(run_dir))[1]:
                    rmtree(os.path.join(run_dir,
                                        sub_dir))  # erase all old directories

                # modify configuration file with new seed
                old_seed = configs['training'].initialization['graph_seed']
                new_seed = old_seed + args.seed_increment
                for line in fileinput.input(args.config_file, inplace=True):
                    print line.replace('randSeed ' + str(old_seed),
                                       'randSeed ' + str(new_seed)),

                restart = True
            else:
                print('Milestone missed; model will be terminated.')

        except:
            print('Unexpected error: ', sys.exc_info()[0])
            raise

        finally:
            # Wrap up (ask threads to stop, save final checkpoint, etc.)
            if models['training']._is_started:
                models['training'].finish(session,
                                          save=args.checkpoint_on_finish)
            stdout_err_file_handle.close()

    return restart
Esempio n. 12
0
def main(dataset, run, n_clips, clip_length):
    dc = DatasetConfig(dataset)
    rc = RunConfig(dataset, run)
    mask = Masker(dataset)
    classes = get_classnames(dataset)
    num_classes = len(classes) + 1
    calib = Calibration(dataset)

    dataset_path = "{dsp}{ds}/".format(dsp=datasets_path, ds=dataset)
    run_path = "{rp}{ds}_{r}/".format(rp=runs_path, ds=dataset, r=run)

    # Grab a bunch of videos
    vids_query = "{dsp}videos/*.mkv".format(dsp=dataset_path)
    all_vids = glob(vids_query)
    all_vids = [right_remove(x.split('/')[-1], '.mkv') for x in all_vids]

    all_vids.sort()

    vids = []

    if n_clips > len(all_vids):
        n_clips = len(all_vids)

    if n_clips == len(all_vids):
        vids = all_vids
    else:
        while len(vids) < n_clips:
            vid = choice(all_vids)
            if not vid in vids:
                vids.append(vid)

    print_flush(vids)

    # Find out what has been run on all of these videos, what to include
    include_klt = True
    include_pixeldets = True
    include_worlddets = True
    include_worldtracks = True

    klts = []
    pixeldets = []
    worlddets = []
    worldtracks = []

    # Point tracks need to be converted for faster access
    vidres = dc.get('video_resolution')
    kltres = dc.get('point_track_resolution')

    class KLTConfig(object):
        klt_x_factor = 0
        klt_y_factor = 0

    klt_config = KLTConfig()
    klt_config.klt_x_factor = vidres[0] / kltres[0]
    klt_config.klt_y_factor = vidres[1] / kltres[1]

    ssdres = rc.get('detector_resolution')
    x_scale = vidres[0] / ssdres[0]
    y_scale = vidres[1] / ssdres[1]

    colors = class_colors(num_classes)

    for vid in vids:
        f = get_klt_path(dataset_path, vid)
        if not isfile(f):
            include_klt = False
        else:
            klt = load(f)
            klt, klt_frames = convert_klt(klt, klt_config)
            pts = (klt, klt_frames, class_colors(n_cols_klts))
            klts.append(pts)

        f = get_pixeldet_path(run_path, vid)
        if not isfile(f):
            include_pixeldets = False
        else:
            dets = pd.read_csv(f)

            pixeldets.append((dets, colors, x_scale, y_scale))

        f = get_worlddet_path(run_path, vid)
        if not isfile(f):
            include_worlddets = False
        else:
            dets = pd.read_csv(f)

            worlddets.append((dets, colors, calib))

        f = get_worldtracks_path(run_path, vid)
        if not isfile(f):
            include_worldtracks = False
        else:
            tracks = load(f)
            worldtracks.append((tracks, class_colors(n_cols_tracks), calib))

    print_flush("Point tracks: {}".format(include_klt))
    print_flush("Pixel coordinate detections: {}".format(include_pixeldets))
    print_flush("World coordinate detections: {}".format(include_worlddets))
    print_flush("World coordinate tracks: {}".format(include_worldtracks))

    # Decide where to start and stop in the videos
    clip_length = clip_length * dc.get(
        'video_fps')  # convert from seconds to frames

    print_flush("Clip length in frames: {}".format(clip_length))

    clips = []
    for vid in vids:
        start, stop = make_clip(vid, clip_length, dataset_path)
        clips.append((start, stop))

    incs = [
        include_klt, include_pixeldets, include_worlddets, include_worldtracks
    ]
    funs = [klt_frame, pixeldet_frame, worlddet_frame, worldtracks_frame]
    dats = [klts, pixeldets, worlddets, worldtracks]
    nams = [
        "Point tracks", "Detections in pixel coordinates",
        "Detections in world coordinates", "Tracks in world coordinates"
    ]

    print_flush(clips)

    with iio.get_writer("{trp}summary.mp4".format(trp=run_path),
                        fps=dc.get('video_fps')) as outvid:
        for i_vid, vid in enumerate(vids):
            print_flush(vid)
            old_prog = 0

            with iio.get_reader("{dsp}videos/{v}.mkv".format(dsp=dataset_path,
                                                             v=vid)) as invid:
                start, stop = clips[i_vid]
                for i_frame in range(start, stop):
                    frame = invid.get_data(i_frame)

                    pieces = []

                    for inc, fun, dat, nam in zip(incs, funs, dats, nams):
                        if inc:
                            piece = fun(dat[i_vid],
                                        mask.mask(frame.copy(), alpha=0.5),
                                        i_frame)
                            draw_text(piece, vid, i_frame, nam)
                            pieces.append(piece)

                    outvid.append_data(join(pieces))

                    prog = float(i_frame - start) / (stop - start)
                    if prog - old_prog > 0.1:
                        print_flush("{}%".format(round(prog * 100)))
                        old_prog = prog

    print_flush("Done!")
Esempio n. 13
0
        a = []
        a.append(df.iloc[i]['FirstWord'])
        a.append(df.iloc[i]['SecondWord'])
        texts.append(a)
    for i in range(len(df)):
        labels.append(df.iloc[i]['Relation'])


# Setup logging
logging.basicConfig(
    format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
    datefmt="%m/%d/%Y %H:%M:%S",
    level=logging.INFO,
)

c = RunConfig()

# Loading tokenizer, configuration, and model
tokenizer = BertTokenizer.from_pretrained(c.pretrained_transformer)

filepath = "./datasets/CNItalianDataFiltered.csv"

load(filepath)

finaldict = featurize_texts(texts,
                            tokenizer,
                            labels,
                            max_length=128,
                            add_special_tokens=True,
                            is_text_pair=True,
                            has_toktype_ids=True)
Esempio n. 14
0
def predict(args):
    # create config and model collection objects, and retrieve the run config
    configs = {}
    models = {}
    configs.update({"run": RunConfig(args.config_file)})

    # set GPU-related environmental options and config settings
    os.environ["CUDA_VISIBLE_DEVICES"] = str(
        args.gpu) if args.gpu is not None else ""
    setproctitle("RGN " + configs["run"].names["run"] + " on " +
                 os.getenv("CUDA_VISIBLE_DEVICES", "CPU"))

    # derived files and directories
    checkpoints_dir = args.checkpoint

    logs_dir = os.path.join(LOGS_DIRNAME, "")

    # this is all for evaluation models (including training, so training_batch_size is for evaluation)
    full_training_glob = args.input_file
    training_invocations = configs["run"].evaluation[
        "num_training_invocations"]

    testing_glob = args.input_file
    testing_batch_size = configs["run"].evaluation["num_testing_samples"]
    testing_invocations = configs["run"].evaluation["num_testing_invocations"]

    eval_num_epochs = 1
    training_batch_size = validation_batch_size = testing_batch_size = 1
    training_invocations = validation_invocations = testing_invocations = 1

    # select device placement taking into consideration the interaction between training and evaluation models
    if (configs["run"].computing["training_device"] == "GPU"
            and configs["run"].computing["evaluation_device"] == "GPU"):
        fod_training = {"/cpu:0": ["point_to_coordinate"]}
        fod_evaluation = {"/cpu:0": ["point_to_coordinate"]}
        dd_training = ""
        dd_evaluation = ""
    elif (configs["run"].computing["training_device"] == "GPU"
          and configs["run"].computing["evaluation_device"] == "CPU"):
        fod_training = {"/cpu:0": ["point_to_coordinate", "loss_history"]}
        fod_evaluation = {}
        dd_training = ""
        dd_evaluation = "/cpu:0"
    else:
        fod_training = {}
        fod_evaluation = {}
        dd_training = "/cpu:0"
        dd_evaluation = "/cpu:0"

    # create models configuration templates
    configs.update({
        "training":
        RGNConfig(
            args.config_file,
            {
                "name":
                "training",
                "dataFilesGlob":
                full_training_glob,
                "checkpointsDirectory":
                checkpoints_dir,
                "logsDirectory":
                logs_dir,
                "fileQueueCapacity":
                configs["run"].queueing["training_file_queue_capacity"],
                "batchQueueCapacity":
                configs["run"].queueing["training_batch_queue_capacity"],
                "minAfterDequeue":
                configs["run"].queueing["training_min_after_dequeue"],
                "shuffle":
                configs["run"].queueing["training_shuffle"],
                "tertiaryNormalization":
                configs["run"].loss["training_tertiary_normalization"],
                "batchDependentNormalization":
                configs["run"].loss["training_batch_dependent_normalization"],
                # "alphabetFile": None,
                "functionsOnDevices":
                fod_training,
                "defaultDevice":
                dd_training,
                "fillGPU":
                args.fill_gpu,
            },
        )
    })

    configs.update({
        "evaluation":
        RGNConfig(
            args.config_file,
            {
                "fileQueueCapacity":
                configs["run"].queueing["evaluation_file_queue_capacity"],
                "batchQueueCapacity":
                configs["run"].queueing["evaluation_batch_queue_capacity"],
                "minAfterDequeue":
                configs["run"].queueing["evaluation_min_after_dequeue"],
                "shuffle":
                configs["run"].queueing["evaluation_shuffle"],
                "tertiaryNormalization":
                configs["run"].loss["evaluation_tertiary_normalization"],
                "batchDependentNormalization":
                configs["run"].
                loss["evaluation_batch_dependent_normalization"],
                # "alphabetFile": alphabet_file,
                "functionsOnDevices":
                fod_evaluation,
                "defaultDevice":
                dd_evaluation,
                "numEpochs":
                eval_num_epochs,
                "bucketBoundaries":
                None,
            },
        )
    })

    # Override included evaluation models with list from command-line if specified (assumes none are included and then includes ones that are specified)
    for prefix in ["", "un"]:
        for group in ["training", "validation", "testing"]:
            configs["run"].evaluation.update(
                {"include_" + prefix + "weighted_" + group: False})
    for entry in args.evaluation_model:
        configs["run"].evaluation.update({"include_" + entry: True})

    # Override other command-lind arguments
    if args.gpu_fraction:
        configs["training"].computing.update(
            {"gpu_fraction": args.gpu_fraction})

    configs["evaluation"].loss["include"] = False

    # create training model
    models = {}
    models.update({"training": RGNModel("training", configs["training"])})
    # print('*** training configuration ***')
    # pprint(configs['training'].__dict__)

    # create weighted testing evaluation model (conditional)
    if configs["run"].evaluation["include_weighted_testing"]:
        configs.update({"eval_wt_test": deepcopy(configs["evaluation"])})
        configs["eval_wt_test"].io["name"] = "evaluation_wt_testing"
        configs["eval_wt_test"].io["data_files_glob"] = testing_glob
        configs["eval_wt_test"].optimization["batch_size"] = testing_batch_size
        configs["eval_wt_test"].queueing[
            "num_evaluation_invocations"] = testing_invocations
        models.update(
            {"eval_wt_test": RGNModel("evaluation", configs["eval_wt_test"])})
        # print('\n\n\n*** weighted testing evaluation configuration ***')
        # pprint(configs['eval_wt_test'].__dict__)

    # start head model and related prep
    session = models["training"].start(models.values())
    global_step = models["training"].current_step(session)
    current_log_step = (global_step //
                        configs["run"].io["prediction_frequency"]) + 1

    # predict or train depending on set mode behavior
    result = {}
    try:
        while not models["training"].is_done():
            pred = predict_tertiary(configs, models, session)
            if pred is not None:
                idx, tertiary = pred
                result[idx] = tertiary
    except tf.errors.OutOfRangeError:
        pass
    except:
        print("Unexpected error: ", sys.exc_info()[0])
        raise
    finally:
        if models["training"]._is_started:
            models["training"].finish(session, save=False)

    return result