Ejemplo n.º 1
0
def main(args):
    dataset = pd.read_csv(args.dataset, index_col=0, keep_default_na=False)

    lookup = {
        "pooled": bert_models.PooledModel,
        "entity": bert_models.EntityModel
    }
    bert_class = lookup[args.bert_model_class]

    bert = bert_class.from_model_checkpoint(args.bert_config_file,
                                            args.bert_weights_file)
    print(bert)

    if len(args.classes) > 0:
        unique_classes = sorted(args.classes)
        dataset = dataset[dataset["PREDICATE"].isin(unique_classes)]
    else:
        unique_classes = sorted(set(dataset["PREDICATE"].values))

    y = utils.get_onehot_from_labels(dataset["PREDICATE_LABEL"])

    assert dataset.shape[0] == y.shape[0]

    labels2preds = {i: lab for (i, lab) in enumerate(unique_classes)}
    print(labels2preds)
    docs = dataset["SENTENCE"].values
    scores = bert.predict(docs, predict_classes=False).numpy()
    utils.save_predictions(dataset, scores, args.outfile, labels2preds)
Ejemplo n.º 2
0
def evaluate(model_type, ckpt_weights_file, eval_res_folder, x_data_path,
             y_data_path, batch_size, max_seq_length, vocab_size, n_classes,
             embed_dim, emb_trainable, model_name, rnn_unit_type, loss_type,
             hidden_dim, hidden_activation, out_activation, bidirectional,
             learning_rate, verbose):
    x_data, y_data = utils.create_model_input_data(x_data_path, y_data_path,
                                                   max_seq_length)
    if model_type == "rnn":
        if y_data is not None:
            loss = evaluate_rnn_model(
                ckpt_weights_file, x_data, y_data, batch_size, max_seq_length,
                vocab_size, n_classes, embed_dim, emb_trainable, model_name,
                rnn_unit_type, loss_type, hidden_dim, hidden_activation,
                out_activation, bidirectional, learning_rate, verbose)
        predictions = predict_rnn_model(ckpt_weights_file, x_data, batch_size,
                                        max_seq_length, vocab_size, n_classes,
                                        embed_dim, emb_trainable, model_name,
                                        rnn_unit_type, loss_type, hidden_dim,
                                        hidden_activation, out_activation,
                                        bidirectional, learning_rate, verbose)
        utils.save_predictions(eval_res_folder, predictions)
    elif model_type == "fnn":
        if y_data is not None:
            loss = evaluate_ffn_model()
        predictions = predict_ffn_model()
        utils.save(predictions)
    else:
        raise ValueError(
            "Unknown model type. The supported types are: rnn|ffn")
    print("Loss: %.3f" % loss)
Ejemplo n.º 3
0
Archivo: main.py Proyecto: rjfs/kaggle
def main(data_path, model):
    if model == 'char-gram':
        mod = chargram_cnn.CharGramCNN()
    elif model == 'nb-svm':
        mod = nblr.NaiveBayes()
    elif model == 'random-forest':
        mod = random_forest.ToxicRandomForest()
    else:
        raise Exception('Unknown model: \'%s\'' % model)

    # Load data
    logger.info('Loading data...')
    mod.load_data(data_path)
    # Fit model
    logger.info('Fitting model...')
    mod.fit()
    # Generate predictions df
    logger.info('Generating training predictions...')
    train_preds = mod.train_predictions()
    logger.info('Generating validation predictions...')
    val_preds = mod.validation_predictions()
    logger.info('Generating test predictions...')
    test_preds = mod.test_predictions()
    # Save predictions to files
    logger.info('Saving to files...')
    utils.save_predictions(data_path, train_preds, val_preds, test_preds,
                           model)
Ejemplo n.º 4
0
def main(
    source_dirs,
    target_dir,
    averaging,
):
    source_dir_paths = [
        os.path.join(config.PREDICTIONS_PATH, dir_) for dir_ in source_dirs
    ]
    target_dir_path = os.path.join(config.PREDICTIONS_PATH, target_dir)
    os.makedirs(target_dir_path, exist_ok=True)

    print('source directories:\n' + '\n'.join(source_dir_paths) + '\n')
    print('target directory:\n' + target_dir_path)

    mean = np.mean if averaging == 'mean' else gmean

    dirs_filenames = [os.listdir(dir_) for dir_ in source_dir_paths]
    assert dirs_filenames
    filenames = dirs_filenames[0]
    for (other_filenames, dir_) in zip(dirs_filenames, source_dir_paths):
        assert set(filenames) == set(other_filenames)

    for filename in tqdm(filenames):
        predictions = []
        for prediction_row in zip(
            *(
                npz_file_iterator(os.path.join(dir_path, filename))
                for dir_path in source_dir_paths
            ),
        ):
            prediction = mean(np.vstack(prediction_row), axis=0)
            predictions.append(prediction)
        save_predictions(predictions, os.path.join(target_dir_path, filename))
def evaluate_ffn(params):
    x_data, y_data = utils.load_data(params["eval_x_data"],
                                     params["eval_y_data"])
    fnn_model = model.FFNModel(input_size=x_data.shape[1],
                               output_size=params["output_size"],
                               model_name=params["model_name"],
                               hidden_activation=params["hidden_activation"],
                               out_activation=params["out_activation"],
                               hidden_dims=params["hidden_dims"],
                               layers=params["layers"],
                               kernel_initializer=params["kernel_initializer"],
                               kernel_regularizer=params["kernel_regularizer"],
                               dropouts=[0.0])
    utils.load_model(params["eval_weights_ckpt"], fnn_model,
                     params["learning_rate"])
    if y_data is not None:
        print("Model from checkpoint %s was loaded." %
              params["eval_weights_ckpt"])
        metrics_names, scores = fnn_model.evaluate(
            x_data,
            y_data,
            batch_size=params["batch_size"],
            verbose=params["verbose"])
        loss = scores[0]
        print("Evaluation loss: %.3f" % loss)
    predictions = fnn_model.predict(x_data,
                                    batch_size=params["batch_size"],
                                    verbose=params["verbose"])
    utils.save_predictions(predictions, params["eval_res_folder"],
                           fnn_model.model_name + "_predictions.txt")
Ejemplo n.º 6
0
 def test_end(self, step_outputs):
     total_loss, total_predictions, total_labels, total_unique_ids, total_prediction_probs = list(
     ), list(), list(), list(), list()
     for x in step_outputs:
         total_loss.append(x["loss"])
         total_predictions.append(x["predictions"])
         total_labels.append(x["labels"])
         total_unique_ids.append(x["unique_ids"])
         total_prediction_probs.append(x["prediction_probs"])
     avg_loss = torch.stack(total_loss).double().mean()
     unique_ids = torch.cat(total_unique_ids).long()
     predictions = torch.cat(total_predictions).long()
     prediction_probs = torch.cat(total_prediction_probs, dim=0).double()
     labels = torch.cat(total_labels).long()
     correct = predictions.eq(labels.view_as(predictions)).long()
     accuracy = correct.double().mean()
     save_predictions(self.hparams.output_path,
                      unique_ids.data.cpu().numpy(),
                      predictions.data.cpu().numpy(),
                      labels.data.cpu().numpy(),
                      correct.cpu().numpy(), [
                          prediction_probs[:, i].data.cpu().numpy()
                          for i in range(self.bert_classifier.label_size)
                      ], f"{self.bert_classifier.name}-test")
     return {
         "loss": avg_loss,
         "progress_bar": {
             "test_loss": avg_loss,
             "test_accuracy": accuracy
         },
         "log": {
             "test_loss": avg_loss,
             "test_accuracy": accuracy
         }
     }
def test_model(
        model: nn.Module,
        dataloaders: Dict[str, torch.utils.data.dataloader.DataLoader],
        device: torch.device,
        save_pred_path: Union[None, str] = None,
        is_inception: bool = False
    ) -> None:

    # Set model to evaluate mode
    model.eval()

    # Initialize predictions list
    predictions = []

    # class_to_idx dict (class: idx) and reverse
    class_to_idx = dataloaders['train'].dataset.class_to_idx
    idx_to_class = {idx: cls for cls, idx in class_to_idx.items()}

    # Iterate over data.
    progress_bar = tqdm(dataloaders['test'], desc=f'Test: ')

    for i, (inputs, paths) in enumerate(progress_bar):
        inputs = inputs.to(device)

        with torch.no_grad():
            outputs = model(inputs)
            _, preds = torch.max(outputs, 1)
        
        # accumulate the preductions after each batch
        assert len(preds.shape) == 1
        predictions.extend(list(zip(paths, preds.tolist())))

    # save predictions
    if save_pred_path is not None:
        save_predictions(save_pred_path, predictions, idx_to_class)
def evaluate_rnn(params):
    x_data, y_data = utils.load_data(params["eval_x_data"],
                                     params["eval_y_data"])
    rnn_model = model.RNNModel(
        max_seq_length=x_data.shape[1],
        input_size=params["input_size"],
        output_size=params["output_size"],
        embed_dim=params["embed_dim"],
        emb_trainable=params["emb_trainable"],
        model_name=params["model_name"],
        hidden_activation=params["hidden_activation"],
        out_activation=params["out_activation"],
        hidden_dim=params["hidden_dims"][0],
        kernel_initializer=params["kernel_initializer"],
        kernel_regularizer=params["kernel_regularizer"],
        recurrent_regularizer=params["recurrent_regularizer"],
        input_dropout=0.0,
        recurrent_dropout=0.0,
        rnn_unit_type=params["rnn_unit_type"],
        bidirectional=params["bidirectional"],
        attention=params["attention"],
        embs_matrix=params["embs_matrix"])
    utils.load_model(params["eval_weights_ckpt"], rnn_model,
                     params["learning_rate"])
    print("Model from checkpoint %s was loaded." % params["eval_weights_ckpt"])
    # if y_data is not None:
    #     metrics_names, scores = rnn_model.evaluate(x_data, y_data, batch_size=params["batch_size"], verbose=params["verbose"])
    #     loss = scores[0]
    #     print("Evaluation loss: %.3f"%loss)
    sample_idxs = np.random.randint(x_data.shape[0],
                                    size=params["attn_sample_size"])
    x_data_sample = x_data[sample_idxs, :]
    cPickle.dump(
        sample_idxs,
        open(os.path.join(params["eval_res_folder"], "sample_idxs.pickle"),
             "wb"))
    if params["attention"]:
        attention_weights = get_attention_weights(rnn_model, x_data_sample)
        print("Attention weights shape: ", attention_weights.shape)
        cPickle.dump(
            attention_weights,
            open(
                os.path.join(params["eval_res_folder"], "attn_weights.pickle"),
                "wb"))
        vocab = cPickle.load(open(params["vocab_file"]))
        if params["plot_attn"]:
            inverse_vocab = {value: key for key, value in vocab.items()}
            utils.plot_attention_weights(x_data_sample,
                                         attention_weights,
                                         inverse_vocab,
                                         params["eval_res_folder"],
                                         ids=sample_idxs)

    predictions = rnn_model.predict(x_data,
                                    batch_size=params["batch_size"],
                                    verbose=params["verbose"])
    utils.save_predictions(predictions, params["eval_res_folder"],
                           rnn_model.model_name + "_predictions.txt")
def predict_test(x_test):
    if softmax_activation:
        y_test_raw = model.predict(x_test, verbose=1)
        y_test = np.argmax(y_test_raw, axis=1)
    else:
        y_test_raw = model.predict(x_test, verbose=1)
        y_test = (y_test_raw > 0.5).astype(int).sum(axis=1) - 1

    save_predictions(y_test, files, save_name=output_csv)
Ejemplo n.º 10
0
 def test_end(self, step_outputs):
     total_loss, total_predictions, total_labels, total_unique_ids, total_prediction_probs = list(
     ), list(), list(), list(), list()
     for x in step_outputs:
         total_loss.append(x["loss"])
         total_predictions.append(x["predictions"])
         total_labels.append(x["labels"])
         total_unique_ids.append(x["unique_ids"])
         total_prediction_probs.append(x["prediction_probs"])
     avg_loss = torch.stack(total_loss).double().mean()
     unique_ids = torch.cat(total_unique_ids).long()
     predictions = torch.cat(total_predictions).long()
     prediction_probs = torch.cat(total_prediction_probs, dim=0).double()
     labels = torch.cat(total_labels).long()
     correct = predictions.eq(labels.view_as(predictions)).long()
     real_token_indices = (
         labels.flatten() !=
         BertTokenClassificationDataset.POS_IGNORE_LABEL_IDX
     ).nonzero().flatten()
     accuracy = predictions.flatten().index_select(
         0, real_token_indices).eq(labels.flatten().index_select(
             0, real_token_indices)).double().mean()
     predictions_data, labels_data, correct_data = list(), list(), list()
     prediction_probs_data = [[] for i in range(self.num_labels)]
     for pred, l, c, prob in zip(predictions.cpu(), labels.cpu(),
                                 correct.cpu(), prediction_probs.cpu()):
         real_token_indices = (
             l.flatten() !=
             BertTokenClassificationDataset.POS_IGNORE_LABEL_IDX
         ).nonzero().flatten()
         predictions_data.append(pred.flatten().index_select(
             0, real_token_indices).tolist())
         labels_data.append(l.flatten().index_select(
             0, real_token_indices).tolist())
         correct_data.append(c.flatten().index_select(
             0, real_token_indices).tolist())
         for i in range(self.num_labels):
             prediction_probs_data[i].append(
                 prob[:, i].flatten().index_select(
                     0, real_token_indices).tolist())
     save_predictions(self.hparams.output_path,
                      unique_ids.data.cpu().numpy(), predictions_data,
                      labels_data, correct_data, prediction_probs_data,
                      f"{self.hparams.name}-test")
     return {
         "loss": avg_loss,
         "progress_bar": {
             "test_loss": avg_loss,
             "test_accuracy": accuracy
         },
         "log": {
             "test_loss": avg_loss,
             "test_accuracy": accuracy
         }
     }
Ejemplo n.º 11
0
def evaluate_single_model(pattern_id,
                          pattern_iter_output_dir,
                          eval_data,
                          dev_data,
                          eval_config,
                          results_dict,
                          dev_result_all,
                          eval_result_all,
                          save_logits=False,
                          save_predictions=False):
    # if not wrapper:
    wrapper = TransformerModelWrapper.from_pretrained(
        pattern_iter_output_dir)

    eval_result = wrapper.eval(
        eval_data, eval_config.per_gpu_eval_batch_size, eval_config.n_gpu, eval_config.metrics)
    dev_result = wrapper.eval(
        dev_data, eval_config.per_gpu_eval_batch_size, eval_config.n_gpu, eval_config.metrics)

    logger.info(
        "--- RESULT (pattern_id={}) ---".format(pattern_id))
    logger.info("eval results:")
    logger.info(eval_result['scores'])
    logger.info("dev results:")
    logger.info(dev_result['scores'])

    results_dict['eval_set'] = eval_result['scores']
    results_dict['dev_set'] = dev_result['scores']

    for metric, value in eval_result['scores'].items():
        eval_result_all[metric][pattern_id].append(value)

    for metric, value in dev_result['scores'].items():
        dev_result_all[metric][pattern_id].append(value)

    if save_logits:
        save_logits(os.path.join(pattern_iter_output_dir,
                    'eval_logits.txt'), eval_result['logits'])

        save_logits(os.path.join(pattern_iter_output_dir,
                    'dev_logits.txt'), dev_result['logits'])

    if save_predictions:
        save_predictions(os.path.join(
            pattern_iter_output_dir, 'eval_predictions.jsonl'), wrapper, eval_result)
        save_predictions(os.path.join(
            pattern_iter_output_dir, 'dev_predictions.jsonl'), wrapper, dev_result)
Ejemplo n.º 12
0
def predict_and_save(X, y, z, test, clf, features):
	"""
	----------------
	fit and predict
	----------------
	"""
	clf.fit(X,y)
	model_name = str(clf.__class__).split(".")[-1].split("'")[0]
	
	#! make predictions on test data and store the results as csv	
	preds = [x for x in clf.predict(test)]
	
	header = [['id', 'votes']]
	result = [[z[index], prediction] for index, prediction in enumerate(preds)]
	
	save_predictions(header + result, model_name + "_Submission_%s_%s-%s.csv"% ("-".join(features),
																				str(date.today()), str(int(time.time()))))
Ejemplo n.º 13
0
    def save_files(self, method):
        if method == 'category_weighted_average':
            val_preds = {m.fpath: m.predictions for m in self.models}
            val = self.category_weighted_average(val_preds)
            file_paths = [self.files_dir + f for f in self.files_names]
            test_preds = {
                fp: read_file(fp.replace('-val.out', '-test.out'))
                for fp in file_paths
            }
            test = self.category_weighted_average(test_preds)
            blend = pd.read_csv(blend_file, index_col='id')
            test = 0.6 * test + 0.4 * blend
        else:
            raise NotImplementedError('Not implemented for \'%s\'' % method)

        utils.save_predictions(os.path.dirname(self.trues_file) + '/',
                               train_preds=None,
                               val_preds=val,
                               test_preds=test,
                               model='ensemble')
    def on_epoch_end(self, epoch, logs={}):
        global best_kappa, best_kappa_epoch, val_kappas
        if modelClass.last_activation == "softmax":
            y_pred = self.model.predict(X_val)
            y_pred = np.argmax(y_pred, axis=1)
        else:
            y_pred = self.model.predict(X_val) > 0.5
            y_pred = y_pred.astype(int).sum(axis=1) - 1

        _val_kappa = cohen_kappa_score(y_val, y_pred, weights='quadratic')

        val_kappas.append(_val_kappa)

        print(f"val_kappa: {_val_kappa:.4f}")

        if _val_kappa > best_kappa:
            best_kappa = _val_kappa
            best_kappa_epoch = epoch
            print(
                "Validation Kappa has improved. Predicting and saving model.")
            if modelClass.last_activation == "softmax":
                y_test = self.model.predict(x_test, verbose=(verbose - 2) * -1)
                y_test = np.argmax(y_test, axis=1)
            else:
                y_test = self.model.predict(x_test,
                                            verbose=(verbose - 2) * -1) > 0.5
                y_test = y_test.astype(int).sum(axis=1) - 1

            save_predictions(y_test,
                             file_list,
                             save_name='predictions/{}.csv'.format(model_name))

            self.model.save(os.path.join(model_path, model_name) + '_best.h5')

        pd.DataFrame(val_kappas).to_csv(
            os.path.join(model_path, 'history', model_name) + '-kappa.csv',
            header=False,
            index=False)
        return
Ejemplo n.º 15
0
def update_accuracies(images_dir, curr_max_class, num_classes, classifier,
                      accuracies, save_dir, batch_size, shuffle, dataset):
    seen_classes_test_loader = get_data_loader(images_dir,
                                               False,
                                               0,
                                               curr_max_class,
                                               batch_size=batch_size,
                                               shuffle=shuffle,
                                               dataset=dataset)
    seen_probas, seen_top1, seen_top5 = compute_accuracies(
        seen_classes_test_loader, classifier, num_classes)

    print('\nSeen Classes (%d-%d): top1=%0.2f%% -- top5=%0.2f%%' %
          (0, curr_max_class - 1, seen_top1, seen_top5))
    accuracies['seen_classes_top1'].append(float(seen_top1))
    accuracies['seen_classes_top5'].append(float(seen_top5))

    # save accuracies and predictions out
    utils.save_accuracies(accuracies,
                          min_class_trained=0,
                          max_class_trained=curr_max_class,
                          save_path=save_dir)
    utils.save_predictions(seen_probas, 0, curr_max_class - 1, save_dir)
Ejemplo n.º 16
0
def main(
    experiment_name,
    dropout_p,
    batch_size,
    num_workers,
    augmentation,
    folds,
):
    transforms = make_augmentation_transforms(augmentation, mode='test')
    test_dataset = dataset.TestDataset(transform=transforms)
    model = make_nasnet_model(
        num_classes=config.NUM_CLASSES,
        dropout_p=dropout_p,
    )
    test_data_loader = DataLoader(
        dataset=test_dataset,
        batch_size=batch_size,
        num_workers=num_workers,
        shuffle=False,
        pin_memory=True,
    )
    test_predictions = np.zeros((folds, len(test_dataset), config.NUM_CLASSES))
    for fold_num in range(folds):
        checkpoint = load_checkpoint(
            f'{experiment_name}_{fold_num}_{folds}_best.pth')
        model.load_state_dict(checkpoint['state_dict'])
        model = model.cuda().eval()
        with torch.no_grad():
            for batch_index, (waves, _) in enumerate(tqdm(test_data_loader)):
                waves = Variable(waves).cuda()
                logits = model(waves)
                probs = F.softmax(logits, dim=1)
                numpy_probs = probs.cpu().data.numpy()
                start_index = batch_index * batch_size
                end_index = start_index + numpy_probs.shape[0]
                test_predictions[fold_num, start_index:end_index] = numpy_probs
    save_predictions(test_predictions, f'{experiment_name}.pkl')
    def store_prediction(self, sess, batch_x, batch_y, masks, name):
        prediction = sess.run(self.net.predictor,
                              feed_dict={
                                  self.net.x: batch_x,
                                  self.net.y: batch_y,
                                  self.net.keep_prob: 1.
                              })
        loss = sess.run(self.net.cost,
                        feed_dict={
                            self.net.x: batch_x,
                            self.net.y: batch_y,
                            self.net.keep_prob: 1.
                        })

        logging.info("Validaiton loss = {:.4f}".format(loss))

        prediction_folder = os.path.join(self.prediction_path, name)

        if os.path.exists(prediction_folder):
            shutil.rmtree(prediction_folder, ignore_errors=True)

        os.mkdir(prediction_folder)
        utils.save_predictions(batch_x, batch_y, prediction, masks,
                               prediction_folder)
Ejemplo n.º 18
0
    def evaluate_query_set(self, query_set_dict, has_masks, distance_eq):
        gt = query_set_dict["gt"]
        predictions = []

        for file, im in query_set_dict["images"].items():

            mask = None
            if has_masks:
                mask_filename = os.path.join(
                    self.output_folder,
                    file.split("/")[-1].split(".")[0] + ".png",
                )
                im, mask = mask_background(im)
                gt_mask = query_set_dict["masks"][file.replace("jpg", "png")]
                self.calc_mask_metrics(gt_mask[..., 0] / 255, mask / 255)
                if self.opt.save:
                    save_mask(mask_filename, mask)

            fv = self.calc_FV_query(im, mask)
            distances = calculate_distances(self.feature_vector_protoypes, fv,
                                            distance_eq)

            predictions.append(list(distances.argsort()[:10]))

        if self.opt.save:
            save_predictions(
                os.path.join(
                    self.output_folder,
                    "result_{}.pkl".format(int(has_masks) + 1),
                ),
                predictions,
            )

        map_k = mapk(gt, predictions)

        return map_k
def doc_classification(
    task_config,
    model_name_or_path,
    cache_dir,
    data_dir,
    save_dir,
    model_dir,
    run_name="0",
    lr=1e-05,
    warmup_steps=5000,
    balance_classes=True,
    embeds_dropout=0.1,
    epochs=200,  # large because we use early stopping by default
    batch_size=20,
    grad_acc_steps=1,
    early_stopping_metric="roc_auc",
    early_stopping_mode="max",
    early_stopping_patience=10,
    model_class="Bert",
    tokenizer_class="BertTokenizer",
    do_lower_case=False,
    do_train=True,
    do_eval=True,
    do_hpo=False,
    print_preds=False,
    print_dev_preds=False,
    max_seq_len=512,
    seed=11,
    eval_every=500,
    use_amp=False,
    use_cuda=True,
):
    # Load task config
    task_config = yaml.safe_load(open(task_config))

    data_dir = data_dir
    save_dir = save_dir
    model_dir = model_dir

    # Create label list from args list or (for large label lists) create from file by splitting by space
    if isinstance(task_config["data"]["label_list"], list):
        label_list = task_config["data"]["label_list"]
    else:
        with open(data_dir / 'labels' /
                  task_config["data"]["label_list"]) as code_file:
            label_list = code_file.read().split(" ")

    # Register Outcome Metrics
    register_task_metrics(label_list)

    # General Settings
    set_all_seeds(seed=seed)
    device, n_gpu = initialize_device_settings(use_cuda=use_cuda,
                                               use_amp=use_amp)

    # 1.Create a tokenizer
    tokenizer = Tokenizer.load(
        pretrained_model_name_or_path=model_name_or_path,
        tokenizer_class=tokenizer_class,
        do_lower_case=do_lower_case)

    # 2. Create a DataProcessor that handles all the conversion from raw text into a pytorch Dataset
    processor = TextClassificationProcessor(
        tokenizer=tokenizer,
        max_seq_len=max_seq_len,
        data_dir=data_dir,
        label_list=label_list,
        metric=task_config["metric"],
        multilabel=task_config["multilabel"],
        train_filename=task_config["data"]["train_filename"],
        dev_filename=task_config["data"]["dev_filename"],
        dev_split=task_config["data"]["dev_split"]
        if "dev_split" in task_config["data"] else None,
        test_filename=task_config["data"]["test_filename"],
        delimiter=task_config["data"]["parsing"]["delimiter"],
        quote_char=task_config["data"]["parsing"]["quote_char"],
        label_column_name=task_config["data"]["parsing"]["label_column"])

    # 3. Create a DataSilo that loads several datasets (train/dev/test), provides DataLoaders for them and calculates a
    #    few descriptive statistics of our datasets
    data_silo = DataSilo(processor=processor,
                         caching=True,
                         cache_path=Path(cache_dir),
                         batch_size=batch_size)

    if do_train:

        # Setup MLFlow logger
        ml_logger = MLFlowLogger(tracking_uri=task_config["log_dir"])
        ml_logger.init_experiment(
            experiment_name=task_config["experiment_name"],
            run_name=f'{task_config["experiment_name"]}_{run_name}')

        # 4. Create an AdaptiveModel
        # a) which consists of a pretrained language model as a basis
        language_model = LanguageModel.load(model_name_or_path,
                                            language_model_class=model_class)

        # b) and a prediction head on top that is suited for our task

        # Define class weights
        if balance_classes:
            class_weights = data_silo.calculate_class_weights(
                task_name=task_config["task_type"])
        else:
            class_weights = None

        # Create Multi- or Single-Label Classification Heads
        if task_config["multilabel"]:

            prediction_head = MultiLabelTextClassificationHead(
                class_weights=class_weights, num_labels=len(label_list))

        else:
            prediction_head = ExtendedTextClassificationHead(
                class_weights=class_weights, num_labels=len(label_list))

        model = ExtendedAdaptiveModel(
            language_model=language_model,
            prediction_heads=[prediction_head],
            embeds_dropout_prob=embeds_dropout,
            lm_output_types=[task_config["output_type"]],
            device=device)

        # 5. Create an optimizer
        schedule_opts = {
            "name": "LinearWarmup",
            "num_warmup_steps": warmup_steps
        }

        model, optimizer, lr_schedule = initialize_optimizer(
            model=model,
            learning_rate=lr,
            device=device,
            n_batches=len(data_silo.loaders["train"]),
            n_epochs=epochs,
            use_amp=use_amp,
            grad_acc_steps=grad_acc_steps,
            schedule_opts=schedule_opts)

        # 6. Create an early stopping instance
        early_stopping = None
        if early_stopping_mode != "none":
            early_stopping = EarlyStopping(mode=early_stopping_mode,
                                           min_delta=0.0001,
                                           save_dir=model_dir,
                                           metric=early_stopping_metric,
                                           patience=early_stopping_patience)

        # 7. Feed everything to the Trainer, which keeps care of growing our model into powerful plant and evaluates it
        # from time to time

        trainer = ExtendedTrainer(model=model,
                                  optimizer=optimizer,
                                  data_silo=data_silo,
                                  epochs=epochs,
                                  n_gpu=n_gpu,
                                  lr_schedule=lr_schedule,
                                  evaluate_every=eval_every,
                                  early_stopping=early_stopping,
                                  device=device,
                                  grad_acc_steps=grad_acc_steps,
                                  evaluator_test=do_eval)

        def score_callback(eval_score, train_loss):
            tune.report(roc_auc_dev=eval_score, train_loss=train_loss)

        # 8. Train the model
        trainer.train(score_callback=score_callback if do_hpo else None)

        # 9. Save model if not saved in early stopping
        model.save(model_dir + "/final_model")
        processor.save(model_dir + "/final_model")

    if do_eval:
        # Load newly trained model or existing model
        if do_train:
            model_dir = model_dir
        else:
            model_dir = Path(model_name_or_path)

        logger.info("###### Eval on TEST SET #####")

        evaluator_test = ExtendedEvaluator(
            data_loader=data_silo.get_data_loader("test"),
            tasks=data_silo.processor.tasks,
            device=device)

        # Load trained model for evaluation
        model = ExtendedAdaptiveModel.load(model_dir, device)
        model.connect_heads_with_processor(data_silo.processor.tasks,
                                           require_labels=True)

        # Evaluate
        results = evaluator_test.eval(model, return_preds_and_labels=True)

        # Log results
        utils.log_results(results,
                          dataset_name="test",
                          steps=len(evaluator_test.data_loader),
                          save_path=model_dir + "/eval_results.txt")

        if print_preds:
            # Print model test predictions
            utils.save_predictions(results,
                                   save_dir=model_dir,
                                   multilabel=task_config["multilabel"])

        if print_dev_preds:
            # Evaluate on dev set, e.g. for threshold tuning
            evaluator_dev = Evaluator(
                data_loader=data_silo.get_data_loader("dev"),
                tasks=data_silo.processor.tasks,
                device=device)
            dev_results = evaluator_dev.eval(model,
                                             return_preds_and_labels=True)
            utils.log_results(dev_results,
                              dataset_name="dev",
                              steps=len(evaluator_dev.data_loader),
                              save_path=model_dir + "/eval_dev_results.txt")

            # Print model dev predictions
            utils.save_predictions(dev_results,
                                   save_dir=model_dir,
                                   multilabel=task_config["multilabel"],
                                   dataset_name="dev")
                sampled = sample_training_data(sampling_percentage,
                                               possible_train_instances,
                                               possible_train_labels)

                train_instances = sampled[0]
                train_gold_labels = sampled[1]

                predictions[dataset_filename][fold][str(
                    sampling_percentage)] = {}
                subpredictions = predictions[dataset_filename][fold][str(
                    sampling_percentage)]

                for strategy_name, strategy in config["generation_strategies"]:
                    subpredictions[strategy_name] = {}

                    for clf_name, base_clf in config["base_classifiers"]:
                        clf_pool = strategy(base_clf(), config["pool_size"])
                        clf_pool.fit(train_instances, train_gold_labels)

                        hard_voting_clf = get_voting_clf(clf_pool)
                        cur_predictions = hard_voting_clf.predict(
                            test_instances)
                        subpredictions[strategy_name][
                            clf_name] = cur_predictions.astype(int).tolist()

                        print "Experiment " + str(exp)
                        exp += 1

    print "Finished experiment"
    save_predictions(predictions)
    print "Stored predictions"
Ejemplo n.º 21
0
from models import K_SVM, KernelLogReg, KernelRidgeReg
from utils import get_saved_data, save_predictions, reformat_data

if __name__ == '__main__':
    # Kernels considered
    methods = ['WD_d10', 'SP_k6']

    # Import data
    data, kernels, ID = get_saved_data(methods)

    print('\n\n')
    print('Getting the data.........')
    X_train, y_train, X_val, y_val, X_test, kernels, ID = reformat_data(
        data, kernels, ID)

    print('Fitting the model.....')
    model = KernelLogReg(kernels[0], ID, lambda_=0.1, solver='BFGS')
    model.fit(X_train, y_train)

    print('Making predictions .......')
    print('\n\nAccuracy on validation set 1: {:0.4f}'.format(
        model.score(model.predict(X_val), y_val)))

    # Compute predictions
    y_pred = save_predictions(model, X_test)

    print('\n\nPredictions ok')
    print('------')
    print('Check prediction file Yte.csv in folder')
Ejemplo n.º 22
0
def classify_and_save():
    pass_id, test_data = prepare_test_data()
    pred = classify(test_data)
    format_pred = (pred > 0.5).astype(int)
    save_predictions(pass_id, format_pred.reshape(-1))
Ejemplo n.º 23
0
        filename = filename or 'hexbug-training_video-centroid_data'

    input_data = convert_data_to_matrix(filename)
    boundaries = get_wall_boundaries(input_data)

    # Debugging vs Final code
    if args.debug:
        #3000 will lead to a a corner collision
        #5000 will lead to an almost perpendicular collision with a flat wall, then steering to the robot's left
        #8000 will lead to an almost perfect 45 degree ricochet off the bottom wall
        #11000 will lead to a bunching up in the bottom left corner

        #start_index = 11000 #this will be the frame we start at for our test
        start_index = 2000  # 10000 #this will be the frame we start at for our test  # CLW

        len_data = 3600

        test_data = input_data[start_index:start_index + len_data]
        actual = input_data[start_index + len_data:start_index + len_data +
                            60]  #take the next 60 frames
        past_predictions, OTHER = robot_EKF(test_data)

        predicted = predict_next_frames(OTHER, test_data[-1], boundaries, 60)
        print predicted

    else:
        test_data = input_data[:]
        past_predictions, OTHER = robot_EKF(test_data)
        predicted = predict_next_frames(OTHER, test_data[-1], boundaries, 60)
        save_predictions(predicted)
def main():
    # parse command line argument and generate config dictionary
    config = parse_args()
    logger.info(json.dumps(config, indent=2))

    run_config = config['run_config']
    optim_config = config['optim_config']

    # TensorBoard SummaryWriter
    if run_config['tensorboard']:
        writer = SummaryWriter(run_config['outdir'])
    else:
        writer = None

    # set random seed
    seed = run_config['seed']
    torch.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)
    epoch_seeds = np.random.randint(np.iinfo(np.int32).max // 2,
                                    size=optim_config['epochs'])

    # create output directory
    outdir = pathlib.Path(run_config['outdir'])
    outdir.mkdir(exist_ok=True, parents=True)

    # save config as json file in output directory
    outpath = outdir / 'config.json'
    with open(outpath, 'w') as fout:
        json.dump(config, fout, indent=2)

    # load data loaders
    train_loader, test_loader = get_loader(config['data_config'])

    # load model
    logger.info('Loading model...')
    model = utils.load_model(config['model_config'])
    n_params = sum([param.view(-1).size()[0] for param in model.parameters()])
    logger.info('n_params: {}'.format(n_params))

    if run_config['fp16'] and not run_config['use_amp']:
        model.half()
        for layer in model.modules():
            if isinstance(layer, nn.BatchNorm2d):
                layer.float()

    device = torch.device(run_config['device'])
    if device.type == 'cuda' and torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)
    model.to(device)
    logger.info('Done')

    train_criterion, test_criterion = utils.get_criterion(
        config['data_config'])

    # create optimizer
    if optim_config['no_weight_decay_on_bn']:
        params = [
            {
                'params': [
                    param for name, param in model.named_parameters()
                    if 'bn' not in name
                ]
            },
            {
                'params': [
                    param for name, param in model.named_parameters()
                    if 'bn' in name
                ],
                'weight_decay':
                0
            },
        ]
    else:
        params = model.parameters()
    optim_config['steps_per_epoch'] = len(train_loader)
    optimizer, scheduler = utils.create_optimizer(params, optim_config)

    # for mixed-precision
    amp_handle = apex.amp.init(
        enabled=run_config['use_amp']) if is_apex_available else None

    # run test before start training
    if run_config['test_first']:
        _, predictions = test(0, model, test_criterion, test_loader,
                              run_config, writer)
        utils.save_predictions(predictions, 0, outdir)

    state = {
        'config': config,
        'state_dict': None,
        'optimizer': None,
        'epoch': 0,
        'accuracy': 0,
        'best_accuracy': 0,
        'best_epoch': 0,
    }
    epoch_logs = []

    for epoch, seed in zip(range(1, optim_config['epochs'] + 1), epoch_seeds):
        np.random.seed(seed)
        # train
        train_log = train(epoch, model, optimizer, scheduler, train_criterion,
                          train_loader, config, writer, amp_handle)

        # test
        test_log, predictions = test(epoch, model, test_criterion, test_loader,
                                     run_config, writer)

        epoch_log = train_log.copy()
        epoch_log.update(test_log)
        epoch_logs.append(epoch_log)
        utils.save_epoch_logs(epoch_logs, outdir)

        # update state dictionary
        state = update_state(state, epoch, epoch_log['test']['accuracy'],
                             model, optimizer)

        # save model
        utils.save_checkpoint(state, outdir)

        # save predictions
        utils.save_predictions(predictions, epoch, outdir)
Ejemplo n.º 25
0
        print("Train shape", x_train.shape)
        print("Validation shape", x_val.shape)

        train(x_train,
              y_train,
              x_val,
              y_val,
              n_aug=N_AUG,
              save_path=MODEL_PATH)

    elif MODE == "predict":
        x_test, y_test, slides_test, _, _, _ = load_data(TEST_FEATURES,
                                                         flt=FLT,
                                                         fold=None)
        print("Predict shape", x_test.shape)

        print("Loading model from", MODEL_PATH)
        booster = lgb.Booster(model_file=str(MODEL_PATH))
        pred = booster.predict(x_test)

        y_test_reshaped, pred_reshaped, slides_test_reshaped = reshape(
            y_test, pred, N_AUG, slides=slides_test)
        if y_test_reshaped is not None:
            evaluate(y_test_reshaped, pred_reshaped.mean(axis=1))

        # save
        if PRED_PATH:
            dd = dict(zip(slides_test_reshaped, pred_reshaped))
            save_predictions(dd, PRED_PATH, std=True, do_rescale=False)
Ejemplo n.º 26
0
meta_train = None
meta_test = None
for name, meta in classifiers.items():
    if name == 'mnb':
        continue
    p = utils.cross_val_proba(meta, train_preds, ym, 5, hash(name + str(meta)))
    print 'Meta', name, f1_score(ym, np.array(p >= 0.5, dtype='l'), average='samples')
    meta_train = p if meta_train is None else np.hstack((meta_train, p))
    # Test set
    if use_test:
        meta.fit(train_preds, ym)
        pt = meta.predict_proba(test_preds)
        pt = np.transpose(np.array(pt)[:,:,1]) if type(pt) == type([]) else pt
        meta_test = pt if meta_test is None else np.hstack((meta_test, pt))

#meta = classifiers['svc']
#print 'Meta', f1_score(ym, cross_val_predict(meta, meta_train, ym, 5, n_jobs=5), average='samples')

#if use_test:
#    meta.fit(meta_train, ym)
#    pt = meta.predict(meta_test)
#    utils.save_predictions(idt, pt)

meta = classifiers['svc6']
print 'Meta', f1_score(ym, cross_val_predict(meta, train_preds[:, 9:], ym, 5, n_jobs=5), average='samples')
if use_test:
    meta.fit(train_preds[:, 9:], ym)
    pt = meta.predict(test_preds[:, 9:])
    utils.save_predictions(idt, pt)
Ejemplo n.º 27
0
def main(prefix="",
         url_feature="",
         url_pred="",
         url_len="",
         url_weight="",
         batch_size=126,
         max_input_len=30,
         max_sent_length=24,
         embed_size=13,
         acc_range=10,
         sight=1,
         is_classify=0,
         decoder=1,
         decoder_size=4,
         loss='mae',
         context_meaning=1,
         rnn_layer=1):
    # init model
    model = Model(max_input_len=max_input_len,
                  max_sent_len=max_sent_length,
                  embed_size=embed_size,
                  using_bidirection=False,
                  fw_cell="basic",
                  bw_cell="basic",
                  batch_size=batch_size,
                  is_classify=is_classify,
                  use_tanh_prediction=True,
                  target=5 if is_classify else 1,
                  loss=loss,
                  acc_range=acc_range,
                  input_rnn=False,
                  sight=sight,
                  use_decoder=decoder,
                  dvs=decoder_size,
                  rnn_layer=rnn_layer)

    # model.init_data_node()
    tf.reset_default_graph()
    with tf.device('/%s' % p.device):
        model.init_ops()
        saver = tf.train.Saver()

    utils.assert_url(url_feature)
    if url_pred:
        utils.assert_url(url_pred)
        dataset = utils.load_file(url_feature)
        pred = utils.load_file(url_pred, False)
        if is_classify:
            pred = [
                utils.get_pm25_class(round(float(x.replace("\n", ""))))
                for x in pred
            ]
        else:
            pred = [round(float(x.replace("\n", ""))) for x in pred]
        if max_input_len > 1:
            utils.assert_url(url_len)
            data_len = utils.load_file(url_len)
        else:
            data_len = None
        _, test = utils.process_data(dataset,
                                     data_len,
                                     pred,
                                     batch_size,
                                     max_input_len,
                                     max_sent_length,
                                     True,
                                     sight,
                                     context_meaning=context_meaning)
    else:
        test = utils.load_file(url_feature)

    tconfig = tf.ConfigProto(allow_soft_placement=True)

    with tf.Session(config=tconfig) as session:
        init = tf.global_variables_initializer()
        session.run(init)
        # saver = tf.train.import_meta_graph(url_weight + ".meta")
        saver.restore(session, url_weight)
        print('==> running model')
        _, _, preds, lb = model.run_epoch(session, test, shuffle=False)
        preds = [x if x <= 45 else (x + 10) for x in preds]
        lb = lb[0:len(preds)]
        # print('Validation loss: {}'.format(valid_loss))
        # print('Validation accuracy: {}'.format(valid_accuracy))
        # tmp = 'Test validation accuracy: %.4f' % valid_accuracy
        # utils.save_file("test_acc/%s_test_acc.txt" % prefix, tmp, False)
        evaluate(preds, lb, acc_range, is_classify)
        utils.save_predictions(preds, lb, p.test_preds % prefix)
Ejemplo n.º 28
0
def main(
    model_name,
    dropout_p,
    experiment_name,
    batch_size,
    save_npz_every_n_batches,
    flip_lr,
    flip_ud,
    crop,
    crop_size,
    crop_location,
    resize_after_crop,
    num_workers,
):
    base_predictions_dir = os.path.join(
        config.PREDICTIONS_PATH,
        experiment_name,
    )
    os.makedirs(base_predictions_dir, exist_ok=True)

    predictions_dir_name = 'npz'
    if flip_lr:
        predictions_dir_name += '_fliplr'

    if flip_ud:
        predictions_dir_name += '_flipud'

    if crop:
        crop_size_width, crop_size_height = crop_size
        predictions_dir_name += (
            f'_crop_{crop_size_width}x{crop_size_height}_{crop_location}')
        if resize_after_crop:
            predictions_dir_name += '_resize_after_crop'

    print(predictions_dir_name)

    predictions_dir = os.path.join(base_predictions_dir, predictions_dir_name)
    os.makedirs(predictions_dir, exist_ok=True)

    model = get_model(
        model_name,
        num_classes=config.NUM_CLASSES,
        dropout_p=dropout_p,
    )
    checkpoint = load_checkpoint(f'{experiment_name}_best.pth')
    model.load_state_dict(checkpoint['state_dict'])
    model = model.cuda().eval()

    if not crop:
        crop_params = None
    else:
        crop_params = crop_size, crop_location, resize_after_crop

    transform = make_test_augmentation_transforms(
        crop_params,
        flip_lr,
        flip_ud,
    )
    test_dataset = dataset.CdiscountTestDataset(
        bson_filepath=config.TEST_BSON_FILE,
        transform=transform,
    )

    test_data_loader = DataLoader(
        dataset=test_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=num_workers,
        pin_memory=True,
    )

    saved_batch_index = 0
    predictions = []
    for batch_index, (image_groups, product_ids, img_nums) in tqdm(
            enumerate(test_data_loader),
            total=len(test_data_loader),
    ):
        images = Variable(image_groups, volatile=True).cuda()
        logits = model(images)
        probs = F.softmax(logits)
        numpy_probs = probs.cpu().data.numpy()
        predictions.append(numpy_probs)
        if batch_index != 0 and batch_index % save_npz_every_n_batches == 0:
            save_predictions(
                predictions,
                os.path.join(predictions_dir, f'{batch_index}.npz'),
            )
            saved_batch_index += 1
            predictions = []
    save_predictions(
        predictions,
        os.path.join(predictions_dir, f'{batch_index}.npz'),
    )
Ejemplo n.º 29
0
# Load and predict data
normalize = transforms.Normalize(mean=[0.000],
                                std=[1.000])
test_dataset = HW_Dataset(labels_dir=LABELS_DIR,
                            imgs_dir=IMGS_DIR,
                            data_list=TEST_LIST,
                            transform=transforms.Compose([
                                transforms.ToTensor(),
                                normalize
                            ]))
test_loader = DataLoader(dataset=test_dataset, batch_size=TEST_BATCH_SIZE,
                            shuffle=False, num_workers=0, pin_memory=True)
# switch to evaluate mode (deactivate dropout)
model.eval()
test_loss = 0
num_incorrect = 0
total_str_dist = 0
preds = []
targets = []
with torch.no_grad():
    for batch_idx, (x, y_target) in enumerate(test_loader):
        if batch_idx % LOG_INTERVAL == 0:
            print("performed inference on batch: ", batch_idx)
        x = x.to(device)
        y = model(x)
        y_target = utils.encode_words(y_target, CLASS_NAMES)
        preds.extend(utils.decode_output(y.cpu(), CLASS_NAMES))
        targets.extend(utils.decode_label_words(y_target, CLASS_NAMES))
    # save submission
    utils.save_predictions(SUBMISSION_PATH, TEST_LIST, preds)
                              verbose=1,
                              min_delta=1e-4)
callbacks_list = [logger, reduce_lr, checkpoint]

STEP_SIZE_TRAIN = x_train.shape[0] // BATCH_SIZE

model.fit(x_train,
          y_train,
          batch_size=BATCH_SIZE,
          epochs=EPOCHS,
          validation_data=(x_val, y_val),
          callbacks=callbacks_list,
          class_weight=None,
          verbose=verbose)

# model.save(os.path.join(model_path, model_name) + '.h5')
model = load_model(os.path.join(model_path, model_name) + '_best.h5')

#####
y_test = model.predict(x_test, verbose=(verbose - 2) * -1)
y_test = np.round(np.clip(y_test, 0, 4)).astype(int).ravel()

file_list = pd.read_csv(os.path.join(data_folder, 'Test/test_files.csv'),
                        header=None,
                        squeeze=True).values

os.makedirs('predictions', exist_ok=True)
save_predictions(y_test,
                 file_list,
                 save_name='predictions/{}.csv'.format(model_name))
Ejemplo n.º 31
0
def eval_set(loader, gt_correspondences, bbdd_fvs, opt):
    masks_metrics = {"precision": [], "recall": [], "f1": []}
    ious = []
    predictions = []
    set_bboxes = []
    for name, query_image, gt_mask in loader:
        if opt.apply_denoise:
            query_image, Noise_level_before, Noise_level_after, blur_type_last = detect_denoise(
                query_image, opt.blur_type)
        # transform to another color space
        multiple_painting, split_point, bg_mask = detect_paintings(query_image)
        bboxes, bbox_mask = detect_bboxes(query_image)
        res_mask = bg_mask.astype(bool) ^ bbox_mask.astype(
            bool) if loader.detect_bboxes else bg_mask
        if loader.compute_masks:
            if loader.evaluate:
                calc_mask_metrics(masks_metrics, gt_mask / 255, bg_mask)
            if opt.save:
                mask_name = name.split("/")[-1].replace(".jpg", ".png")
                save_mask(
                    os.path.join(opt.output,
                                 loader.root.split("/")[-1], mask_name),
                    res_mask * 255)

        # cropped sets, no need to mask image for retrieval
        if gt_mask is None:
            res_mask = None
        if loader.detect_bboxes:
            set_bboxes.append(bboxes)

        # change colorspace before computing feature vector
        query_image = transform_color(
            query_image, opt.color) if opt.color is not None else query_image
        if multiple_painting and gt_mask is not None:
            im_preds = []
            left_paint = np.zeros_like(res_mask)
            right_paint = np.zeros_like(res_mask)

            left_paint[:, split_point:] = res_mask[:, split_point:]
            right_paint[:, :split_point] = res_mask[:, :split_point]

            res_masks = [left_paint, right_paint]
            for submasks in res_masks:
                query_fv = calc_FV(query_image, opt, submasks).ravel()
                distances = calculate_distances(bbdd_fvs,
                                                query_fv,
                                                mode=opt.dist)
                im_preds.append((distances.argsort()[:10]).tolist())
            predictions.append(im_preds)

        else:
            query_fv = calc_FV(query_image, opt, res_mask).ravel()
            distances = calculate_distances(bbdd_fvs, query_fv, mode=opt.dist)

            predictions.append((distances.argsort()[:10]).tolist())

    if opt.save:
        save_predictions(
            "{}/{}/result.pkl".format(opt.output,
                                      loader.root.split("/")[-1]), predictions)
        save_predictions(
            "{}/{}/text_boxes.pkl".format(opt.output,
                                          loader.root.split("/")[-1]),
            set_bboxes)

    map_k = {
        i: mapk(gt_correspondences, predictions, k=i)
        for i in [10, 3, 1]
    } if loader.evaluate else None
    avg_mask_metrics = averge_masks_metrics(
        masks_metrics) if loader.evaluate else None

    return map_k, avg_mask_metrics