Exemplo n.º 1
0
def val_model(model, criterion):
    dset_sizes = len(val_dataset)
    model.eval()
    running_loss = 0.0
    running_corrects = 0
    cont = 0
    outPre = []
    outLabel = []
    pres_list = []
    labels_list = []
    for data in val_loader:
        inputs, labels, month = data['X']['x'], data['Y'], data['X']['m']
        x = inputs[0]
        labels = labels.type(torch.float).cuda()
        inputs = [x.cuda()]
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        if cont == 0:
            outPre = outputs.data.cpu()
            outLabel = labels.data.cpu()
        else:
            outPre = torch.cat((outPre, outputs.data.cpu()), 0)
            outLabel = torch.cat((outLabel, labels.data.cpu()), 0)
        pres_list += outputs.cpu().numpy().tolist()
        labels_list += labels.data.cpu().numpy().tolist()
        running_loss += loss.item() * outputs.size(0)
        cont += 1
    #
    labels_arr = np.array(labels_list)
    pre_arr = np.array(pres_list)
    val_score = score(labels_arr, pre_arr)
    val_rmse = rmse(labels_arr, pre_arr)
    return val_score, val_rmse
Exemplo n.º 2
0
def prediction(f_pred, f_pred_prob, prepare_valid_or_test_batch_image_data, split, iterator, ix_to_word, get_raw_sentences_from_imgid, model_options, prediction_save_n):
  # prediction_sentences -> imgid to sentence as a list
  prediction_sentences = {}
  # prediction_gt_sents -> imgid to list of ground truth sentences
  prediction_gt_sents = {}
    # prediction_log_prob -> imgid to log_prob
  prediction_log_prob = {}
  
  for _, valid_index in iterator:
    imgs, img_ids = prepare_valid_or_test_batch_image_data(valid_index, split)
    pred = f_pred(imgs)
    pred_prob = f_pred_prob(imgs)
    
    for idx, img_id in enumerate(img_ids):
      prediction_sentences[img_id] = [utils.convert_idx_to_sentences(ix_to_word, pred[:, idx])]
      # TODO: Need to fix log prop
      prediction_log_prob[img_id] = sum(pred_prob[:, idx])
      prediction_gt_sents[img_id] = get_raw_sentences_from_imgid(img_id)
  
  hypo = {idx:x for idx, x in enumerate(prediction_sentences.values())}  
  ref = {idx:x for idx, x in enumerate(prediction_gt_sents.values())}
  
  if numpy.mod(prediction_save_n, model_options['hypo_save_freq']) == 0:
    save_path = os.path.join(model_options['hypo_save_dir'], 'hypo{0}.pkl'.format(prediction_save_n))
    pickle.dump([hypo, ref], open(save_path, 'wb'), -1)
    print 'Saved hypo to ', os.path.abspath(save_path)
  
  scores = metrics.score(ref, hypo)
  return scores
def classification_validate(model,
                            val_id_type_list,
                            **kwargs):

    params = dict(kwargs)
    assert 'seed' in params, "Need seed, params = {}".format(params)
    assert 'normalize_data' in params, "Need normalize_data"
    verbose = 1 if 'verbose' not in params else params['verbose']
    save_predictions = False if 'save_predictions' not in params else params['save_predictions']
    save_predictions_id = '' if 'save_predictions_id' not in params else params['save_predictions_id']
    n_classes = len(unique_tags) if 'n_classes' not in params else params['n_classes']

    normalize_data = params['normalize_data']
    if normalize_data:
        assert 'normalization' in params, "Need normalization"
        normalization = params['normalization']
    else:
        normalization = None

    if normalize_data and normalization == '':
        params['normalization'] = 'from_save_prefix'

    val_seq = get_val_imgaug_seq(params['seed'])
    val_gen, val_flow = get_gen_flow(id_type_list=val_id_type_list,
                                     imgaug_seq=val_seq,
                                     test_mode=True, **params)

    y_true_total = np.zeros((len(val_id_type_list), n_classes))
    y_pred_total = np.zeros_like(y_true_total)
    info_total = np.empty((y_true_total.shape[0], ), dtype=np.object)
    counter = 0
    for x, y_true, info in val_flow:
        if verbose > 0:
            print("-- %i / %i" % (counter, len(val_id_type_list)), info)
        s = y_true.shape[0]
        start = counter * s
        end = min((counter + 1) * s, len(val_id_type_list))
        y_true_total[start:end, :] = y_true
        info_total[start:end] = ['train_' + i[0] for i in info]

        y_pred = model.predict(x)
        y_pred_total[start:end, :] = y_pred

        counter += 1

    if save_predictions:
        df = pd.DataFrame(columns=('image_name',) + tuple(unique_tags))
        df['image_name'] = info_total
        df[unique_tags] = y_pred_total
        df.to_csv(os.path.join(OUTPUT_PATH, 'val_predictions_' + save_predictions_id + '.csv'), index=False)
        if verbose > 0:
            print("Saved predictions with id: %s" % save_predictions_id)

    y_pred_total2 = pred_threshold(y_pred_total)
    total_f2 = score(y_true_total, y_pred_total2)
    total_mae = mean_absolute_error(y_true_total, y_pred_total2)

    if verbose > 0:
        print("Total f2, mae : ", total_f2, total_mae)
    return total_f2, total_mae
Exemplo n.º 4
0
    def tune(self, X, y, params, metric, cv=5):
        from sklearn.model_selection import GridSearchCV
        from metrics import score

        gs = GridSearchCV(self, params, cv=cv, scoring=score(metric))
        gs.fit(X, y)

        print(f"Best score: {gs.best_score_}")
        print(f"Best parameters: {gs.best_params_}")
def check_metrics(hyps, refs):
    # This block of code is extremely ugly
    hyps = hyps + ".txt"
    parser = argparse.ArgumentParser()
    refs_list = os.listdir(refs)
    refs_list = ["%s/%s" % (refs, x) for x in refs_list]
    parser.add_argument(
        "refs", type=argparse.FileType('r'), nargs="+"
    )  # nargs="+":all command-line args present are gathered into a list.
    parser.add_argument("hyps", type=argparse.FileType('r'))
    refs_list.append(hyps)
    args = parser.parse_args(refs_list)

    references, hypotheses = metrics.load_textfiles(args.refs, args.hyps)
    scores = metrics.score(references, hypotheses)
    for k in list(scores):
        scores[k] = scores[k] * 100
    return scores  # shift scores from 0-1 to 0-100
Exemplo n.º 6
0
def _test_instance_output():
    from skimage.io import imsave
    from metrics import score
    from dataset_reader import CoNSeP
    from performance import OutTime 
    import os

    os.makedirs('output', exist_ok=True)

    prefix = 'output/mlflow_refine_point'

    use_patch_idx = False
    # use_patch_idx = True

    dataset = CoNSeP(download=False)

    for IDX in range(1, 15):
    # for IDX in range(1, 2367):
    # observe_idx = 1
    # for IDX in range(observe_idx, observe_idx + 1):
        # for k in np.linspace(1, 2, 21):
        for k in [2.3]:
            with OutTime():
                res = get_instance_output(True, IDX, k=k, use_patch_idx=use_patch_idx, ckpt="model_01_ckpt_epoch_11", dot_refinement=True)
            # seg, hor, vet = get_output_from_file(IDX, transform=DEFAULT_TRANSFORM, use_patch_idx=use_patch_idx)
            # point_mask = dataset.read_points(IDX, 'test')
            # point_mask = binary_dilation(point_mask, disk(3))
            # point_pred = point_mask * 1
            # res = _get_instance_output(seg, hor, vet, k=k)
            # res = _refine_instance_output(res, point_pred)

            img = get_original_image_from_file(IDX, use_patch_idx=use_patch_idx)

            label = dataset.read_labels(IDX, 'test')[0]
            np.save('{}_{}.npy'.format(prefix, IDX), res)

            img = draw_label_boundaries(img, res.copy())

            imsave('{}_{}.png'.format(prefix, IDX), img)

            s = score(res, label, 'DQ_point')
            
            print(k, s['DQ_point'])
Exemplo n.º 7
0
        params=params
    
    
train_loss=np.array(train_loss)
validation_loss=np.array(validation_loss)

plt.plot(np.arange(len(train_loss))+1,np.log10(train_loss),label='train')
plt.plot(np.arange(len(train_loss))+1,np.log10(validation_loss),label='valid')
plt.xlabel('epoch')
plt.ylabel('log10[loss]')
plt.legend()
plt.show()
#print(f_vect1(params[:,0], inputs),f_vect2(params[:,1], inputs),f_vect3(params[:,2], inputs))

print('specie 1:')
score(exact[1,:],f_vect1(params[:,0], inputs_krome))

print('specie 2:')
score(exact[2,:],f_vect1(params[:,1], inputs_krome))

print('specie 3:')
score(exact[3,:],f_vect1(params[:,2], inputs_krome))

plt.plot(exact[0,:],exact[1,:],label='A_krome',color='red')
plt.plot(exact[0,:],exact[2,:],label='B_krome',color='green')
plt.plot(exact[0,:],exact[3,:],label='C_krome',color='blue')

plt.plot(inputs_krome, f_vect1(params[:,0], inputs_krome),'.', label='A_pred',color='red')
plt.plot(inputs_krome, f_vect2(params[:,1], inputs_krome),'.', label='B_pred',color='green')
plt.plot(inputs_krome, f_vect3(params[:,2], inputs_krome),'.', label='C_pred',color='blue')
plt.legend()
Exemplo n.º 8
0
        ],
                                axis=0)
        eval_y = np.concatenate([
            np.load(os.path.join(PATH, 'test_y.npy')),
            np.load(os.path.join(PATH, 'noise_test_y.npy'))
        ],
                                axis=0)

    n_chan = eval_x.shape[-1] // 2
    if config.norm:
        eval_x = minmax_norm_magphase(eval_x)
    eval_x = log_magphase(eval_x)
    eval_y = degree_to_class(eval_y, one_hot=False)

    # 3. predict
    pred_y = model.predict(eval_x)
    if config.verbose:
        print(pred_y[:5])
        print(np.max(pred_y, axis=1))

    n_classes = pred_y.shape[-1]
    pred_y = np.argmax(pred_y, axis=-1)

    print("GROUND TRUTH\n", eval_y)
    print("PREDICTIONS\n", pred_y)

    print("Accuracy:", Accuracy()(eval_y, pred_y).numpy())
    print("SCORE:",
          score(class_to_degree(eval_y), class_to_degree(pred_y)).numpy())
    print(confusion_matrix(eval_y, pred_y))
Exemplo n.º 9
0
# unstable_x locations to confine in time
n_test_samples = [0, 666, 1333, 4000, 6666, 9333, len(test_unstable_x)]
days = ['D+1', 'D+2', 'D+3 - D+6', 'D+7 - D+10', 'D+11 - D+14', 'D+15 - D+18']

# test for different models
print('AUC Scores')
if model_name == 'DCAE':

    # load model
    ae = keras.models.load_model('./{}/DCAE.h5'.format(model_dir))
    encoder = keras.Model(inputs=ae.input,
                          outputs=ae.get_layer('encoded').output)
    decoder = keras.Model(inputs=ae.input,
                          outputs=ae.get_layer('decoded').output)
    y_test_stable_hat = score(ae.predict(test_stable_x), test_stable_x)

    for n_test_i in range(1, len(n_test_samples)):
        y_test_unstable_hat = score(ae.predict(test_unstable_x[n_test_samples[n_test_i-1]:n_test_samples[n_test_i]]), \
                              test_unstable_x[n_test_samples[n_test_i-1]:n_test_samples[n_test_i]])
        true_labels = [0.] * len(y_test_stable_hat) + [1.] * len(
            y_test_unstable_hat)

        fpr, tpr, th = roc_curve(
            true_labels,
            np.concatenate([y_test_stable_hat, y_test_unstable_hat], axis=-1))
        auc_score = auc(fpr, tpr)
        print('{}: {}'.format(days[n_test_i - 1], auc_score))

    # test with all
    y_test_unstable_hat = score(ae.predict(test_unstable_x), test_unstable_x)