Exemplo n.º 1
0
    def __init__(self, config):

        tf_config = tf.ConfigProto()
        tf_config.gpu_options.per_process_gpu_memory_fraction = 0.25
        set_session(tf.Session(config=tf_config))

        cuda.select_device(0)

        self._verbose = config['verbose']
        self._threshold = config['threshold']

        if config['model'] not in list(ascii_lowercase)[:11]:
            raise ValueError('Model only range from a -- j.')
        else:
            self._model = get_model(config['model'])

        if config['gpu'] >= 0:
            os.environ["CUDA_VISIBLE_DEVICES"] = str(config['gpu'])
        else:
            os.environ["CUDA_VISIBLE_DEVICES"] = ""
Exemplo n.º 2
0
    cands_to_eval = glob.glob(f'{args.data_dir}/*h5')

    if len(cands_to_eval) == 0:
        raise FileNotFoundError(f"No candidates to evaluate.")

    logging.debug(f'Read {len(cands_to_eval)} candidates')

    # Get the data generator, make sure noise and shuffle are off.
    cand_datagen = DataGenerator(list_IDs=cands_to_eval,
                                 labels=[0] * len(cands_to_eval),
                                 shuffle=False,
                                 noise=False,
                                 batch_size=args.batch_size)

    model = get_model(args.model)

    # get's get predicting
    probs = model.predict_generator(generator=cand_datagen,
                                    verbose=1,
                                    use_multiprocessing=use_multiprocessing,
                                    workers=args.nproc,
                                    steps=len(cand_datagen))

    # Save results
    results_dict = {}
    results_dict['candidate'] = cands_to_eval
    results_dict['probability'] = probs[:, 1]
    results_dict['label'] = np.round(probs[:, 1] >= args.probability)
    results_file = args.data_dir + f'/results_{args.model}.csv'
    pd.DataFrame(results_dict).to_csv(results_file, index=False)
Exemplo n.º 3
0
        )

    data_df = pd.read_csv(args.data_csv)

    train_df, val_df = train_test_split(data_df,
                                        test_size=(1 - args.val_split),
                                        random_state=1993)
    train_data_generator = DataGenerator(list_IDs=list(train_df['h5']),
                                         labels=list(train_df['label']),
                                         noise=True,
                                         shuffle=True)
    validate_data_generator = DataGenerator(list_IDs=list(val_df['h5']),
                                            labels=list(val_df['label']),
                                            noise=False,
                                            shuffle=False)

    model_to_train = get_model(args.model)

    model_to_train = ready_for_train(model_to_train,
                                     ndt=args.n_dt_layers,
                                     nft=args.n_ft_layers,
                                     nf=args.n_fusion_layers)

    trained_model, history = train(model_to_train,
                                   epochs=args.epochs,
                                   patience=args.patience,
                                   output_path=args.output_path,
                                   nproc=args.nproc,
                                   train_obj=train_data_generator,
                                   val_obj=validate_data_generator)