def printTrainTestPred(model, cnt, trainIn, trainLabel, testIn, testLabel, meta):
    print('Printing train/test predictions:')

    predTrain = data.denormalize(model.predict(trainIn[:cnt]), meta)
    labelTrain = data.denormalize(trainLabel[:cnt], meta)

    trainData = []
    trainCols = []

    for i in xrange(trainLabel.shape[1]):
        trainData.append(list(predTrain[:,i]))
        trainCols.append('train pred label{}'.format(i))
        trainData.append(list(labelTrain[:,i]))
        trainCols.append('train true label{}'.format(i))

    traindf = pd.DataFrame(np.array(trainData).T, columns=trainCols)
    print traindf

    predTest = data.denormalize(model.predict(testIn[:cnt]), meta)
    labelTest = data.denormalize(testLabel[:cnt], meta)

    testData = []
    testCols = []

    for i in xrange(testLabel.shape[1]):
        testData.append(list(predTest[:,i]))
        testCols.append('test pred label{}'.format(i))
        testData.append(list(labelTest[:,i]))
        testCols.append('test true label{}'.format(i))

    testdf = pd.DataFrame(np.array(testData).T, columns=testCols)
    print testdf
Example #2
0
def validate_on_single_img(model, img_path, upscale_factor):
    img = load_img(img_path, upscale_factor)
    w, h = img.size
    size = h // upscale_factor, w // upscale_factor
    x = data.get_input_tranforms(size=size)(img).unsqueeze(0)
    downscaled = data.denormalize(x[0, 0].detach().numpy())
    out_img = model(x).clamp(0)
    out_img = out_img.detach().numpy()
    out_img = data.denormalize(out_img)
    out_img = PIL.Image.fromarray(out_img[0, 0], 'L')
    fig, axs = plt.subplots(1, 3)
    axs[0].imshow(img, cmap='gray', vmin=0, vmax=255)
    axs[1].imshow(downscaled, cmap='gray', vmin=0, vmax=255)
    axs[2].imshow(out_img, cmap='gray', vmin=0, vmax=255)
    plt.show()
Example #3
0
def evaluate_timeseries_with_label(timeseries, labels, window_size):
    filter_length = 128
    nb_filter = 64
    nb_series = 1
    nb_samples = timeseries.shape[0]

    print('\n\nTimeseries ({} samples by {} series):\n'.format(
        nb_samples, nb_series))
    model = make_timeseries_regressor(window_size=window_size,
                                      filter_length=filter_length,
                                      nb_input_series=nb_series,
                                      nb_outputs=nb_series,
                                      nb_filter=nb_filter)
    print(
        '\n\nModel with input size {}, output size {}, {} conv filters of length {}'
        .format(model.input_shape, model.output_shape, nb_filter,
                filter_length))
    model.summary()

    X = np.atleast_3d(timeseries)
    y = np.atleast_3d(labels)
    print('\nShape: {}: y:{}'.format(X.shape, y.shape))

    test_size = int(0.1 * len(timeseries))
    train_size = int(0.9 * len(timeseries))
    #X_train, X_test, y_train, y_test = X[:-test_size], X[-test_size:], y[:-test_size], y[-test_size:]
    idx = np.random.choice(len(timeseries), len(timeseries), replace=False)
    train_idx = idx[:train_size]
    test_idx = idx[train_size:]
    global eval_data_scale, eval_label_scale
    eval_data_scale = data_scale[test_idx, :]
    eval_label_scale = label_scale[test_idx, :]
    X_train, X_test, y_train, y_test = X[train_idx, :], X[test_idx, :], y[
        train_idx, :], y[test_idx, :]

    #train
    model.fit(X_train,
              y_train,
              epochs=30,
              batch_size=8,
              validation_data=(X_test, y_test))
    model.save_weights('try5_keras4.hd5')

    pred = model.predict(X_test)
    X_test = denormalize(X_test, eval_data_scale, axis=1)
    y_test = denormalize(y_test, eval_label_scale, axis=1)
    pred = denormalize(pred, eval_label_scale, axis=1)
    save_plot(X_test, y_test, pred, 'try5_keras4.out', style='keras')
Example #4
0
def plot_image(epoch, generator, dataloader, dim=(1, 3), figsize=(15, 5)):
    for i, imgs in tqdm(enumerate(dataloader)):
        imgs_hr = Variable(imgs["hr"].type(Tensor))
        imgs_lr = Variable(imgs["lr"].type(Tensor))
        gen_hr = generator(imgs_lr)

        #denormalize input
        imgs_lr = denormalize(imgs_lr)

        # Scaling output
        gen_hr = gen_hr.clamp(0, 1)

        for j in range(imgs_lr.shape[0]):
            batches_done = i * len(dataloader) + j
            psnr_val = psnr_fn(gen_hr[[j]], imgs_hr[[j]]).mean().item()
            ssim_val = ssim_fn(gen_hr[[j]], imgs_hr[[j]]).mean().item()

            file_name = os.path.join(
                args.output_path, 'generated_image_' + str(batches_done) +
                "_" + str(epoch) + "_PSNR : " + str(round(psnr_val, 2)) +
                " SSIM : " + str(round(ssim_val, 2)) + '.png')

            lr_image = F.interpolate(imgs_lr[[j]],
                                     (imgs_hr.shape[2], imgs_hr.shape[3]),
                                     mode='nearest')[0]
            hr_image = imgs_hr[j]
            gen_image = gen_hr[j]

            concat_image = torch.cat((lr_image, gen_image, hr_image), 2)

            save_image(concat_image, file_name)
def predict(net, data_iter):
    X = []
    y = []
    p = []
    predict_loss, n = 0.0, 0.0
    i = 0
    for batch in data_iter:
        data, label, batch_size = _get_batch(batch, ctx)
        losses = []
        outputs = [net(X) for X in data]
        losses = [loss(yhat, y) for yhat, y in zip(outputs, label)]
        predict_loss += sum([l.sum().asscalar() for l in losses])
        n += batch_size
        data = [
            denormalize(D,
                        eval_data_scale[i * batch_size:(i + 1) *
                                        batch_size, :],
                        axis=2) for D in data
        ]
        label = [
            denormalize(D,
                        eval_label_scale[i * batch_size:(i + 1) *
                                         batch_size, :],
                        axis=2) for D in label
        ]
        outputs = [
            denormalize(D,
                        eval_label_scale[i * batch_size:(i + 1) *
                                         batch_size, :],
                        axis=2) for D in outputs
        ]
        outputs = [
            D.reshape((batch_size, 1, sequence_length)) for D in outputs
        ]
        X.append(data[0][0].asnumpy())
        y.append(label[0][0].asnumpy())
        p.append(outputs[0][0].asnumpy())
        i += 1
    print("Cumulative_Loss: %.3f, Predict_Loss: %.3f " %
          (predict_loss, predict_loss / n))
    return X, y, p
Example #6
0
def model_predict(net, data_iter, batch_size, num_channel=1):
    i = 0
    X = []
    y = []
    p = []
    cumulative_loss = 0
    for i, (data, label) in enumerate(data_iter):
        data = data.as_in_context(ctx)
        label = label.as_in_context(ctx)
        if data.shape[0] < batch_size:
            continue
        output = net(data)  # (n,w)
        output = output.reshape((batch_size, sequence_length, 1))
        loss = square_loss(output, label)
        cumulative_loss += nd.mean(loss).asscalar()
        print("iter %d, loss: %e" % (i, cumulative_loss))
        denormalize(data,
                    eval_data_scale[i * batch_size:(i + 1) * batch_size, :])
        denormalize(label,
                    eval_label_scale[i * batch_size:(i + 1) * batch_size, :])
        denormalize(output,
                    eval_label_scale[i * batch_size:(i + 1) * batch_size, :])
        X.append(np.squeeze(data[0].asnumpy()))
        y.append(np.squeeze(label[0].asnumpy()))
        p.append(output[0].asnumpy())
        if i == 0:
            print(data.shape, label.shape, output.shape)
        i += 1
    print("predict_loss: %e," % (cumulative_loss))
    return X, y, p
Example #7
0
def plot_image(generators, dataloader, model_names):
    for i, imgs in tqdm(enumerate(dataloader)):
        imgs_hr = Variable(imgs["hr"].type(Tensor))
        imgs_lr = Variable(imgs["lr"].type(Tensor))

        for j in range(imgs_lr.shape[0]):
            batches_done = i * len(dataloader) + j

            file_name = os.path.join(
                args.output_path,
                'generated_image_11_' + str(batches_done) + '.png')
            csv_file_name = os.path.join(
                args.output_path,
                'generated_image_11_' + str(batches_done) + '.csv')

            imgs_lr_1 = imgs_lr.clone()
            lr_image = F.interpolate(denormalize(imgs_lr_1)[[j]],
                                     (imgs_hr.shape[2], imgs_hr.shape[3]),
                                     mode='nearest')[0]
            if args.debug:
                print("LR Shape", lr_image.shape)
            final_image = lr_image.unsqueeze(0)

            list_psnr = []
            list_ssim = []

            for generator in generators:
                gen_hr = generator(imgs_lr)
                gen_hr = gen_hr.clamp(0, 1)
                psnr_val = psnr_fn(gen_hr[[j]], imgs_hr[[j]]).mean().item()
                ssim_val = ssim_fn(gen_hr[[j]], imgs_hr[[j]]).mean().item()
                final_image = torch.cat((final_image, gen_hr[[j]]), 0)
                list_psnr.append(psnr_val)
                list_ssim.append(ssim_val)

            final_image = torch.cat((final_image, imgs_hr[[j]]), 0)

            if args.debug:
                print("Final Shape", final_image.shape)

            grid_image = torchvision.utils.make_grid(final_image, nrow=2)

            if args.debug:
                print("Grid Shape", grid_image.shape)

            save_image(grid_image, file_name)

            for i in range(len(model_names)):
                write_to_csv_file(csv_file_name, [
                    args.dataset_name, model_names[i], list_psnr[i],
                    list_ssim[i]
                ])
Example #8
0
def discreteClassify(model, input, labels, meta):
    partitioner = PermutationPartitioner(len(input), len(input) / RP['num_partitions'])
    iterations = RP['num_partitions']**2

    metrics = {
        'acc': np.zeros((iterations)),
        'log_loss': np.zeros((iterations)),
        # 'auc': np.zeros((iterations)),
        # 'auc_micro': np.zeros((iterations))
    }

    # first denormalize labels, so we do it only once
    labels = data.denormalize(labels, meta)

    for iteration in range(iterations):
        # print('\titer:\t{}/{}'.format(iteration, iterations))

        part = partitioner.get()

        partIn = input[part]
        partLabels = labels[part]
        partPred = model.predict(partIn, batch_size = RP['batch'])
        binarizedPred = np.zeros((len(partPred), len(partPred[0])))

        """
        for row in range(len(partLabels)):
            for idx, val in enumerate(partLabels[row]):
                if val == 1:
                    sys.stdout.write('{}, '.format(idx))
            for val in partPred[row]:
                sys.stdout.write('{}, '.format(val))
            sys.stdout.write('\n')
            sys.stdout.flush()
        """

        for i in range(len(partPred)):
            maxValue = 0
            maxIndex = 0
            for index in range(len(partPred[i])):
                value = partPred[i][index]
                if value > maxValue:
                    maxValue = value
                    maxIndex = index
            binarizedPred[i][maxIndex] = 1

        metrics['acc'][iteration] = sk.metrics.accuracy_score(partLabels,
                binarizedPred)
        metrics['log_loss'][iteration] = sk.metrics.log_loss(partLabels,
                binarizedPred)

        '''
        keepVec = []
        for col in range(len(partLabels[0])):
            wasOne = 0
            for row in range(len(partLabels)):
                if partLabels[row][col] == 1:
                    wasOne = 1
                    break
            if wasOne:
                keepVec.append(col)

        cutLabels = np.zeros((len(partLabels), len(keepVec)))
        cutPreds  = np.zeros((len(partLabels), len(keepVec)))
        for idx, keep in enumerate(keepVec):
            for row in range(len(partLabels)):
                cutLabels[row][idx] = partLabels[row][keep]
                cutPreds[row][idx]  = binarizedPred[row][keep]

        metrics['auc'][iteration] = sk.metrics.roc_auc_score(cutLabels,
                cutPreds, average = 'macro')
        metrics['auc_micro'][iteration] = sk.metrics.roc_auc_score(cutLabels,
                cutPreds, average = 'micro')
        '''

    metricsOverall = {
        'acc_avg': np.nanmean(metrics['acc']),
        'acc_std': np.nanstd(metrics['acc']),
        'log_loss_avg': np.nanmean(metrics['log_loss']),
        'log_loss_std': np.nanstd(metrics['log_loss']),
        # 'auc_avg': np.nanmean(metrics['auc']),
        # 'auc_std': np.nanstd(metrics['auc']),
        # 'auc_micro_avg': np.nanmean(metrics['auc_micro']),
        # 'auc_micro_std': np.nanstd(metrics['auc_micro'])
        'auc_avg': None,
        'auc_std': None,
        'auc_micro_avg': None,
        'auc_micro_std': None,
    }

    print('Overall metrics:')
    print('\tACC:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['acc_avg'],metricsOverall['acc_std']))
    print('\tLogLos:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['log_loss_avg'],metricsOverall['log_loss_std']))
    # print('\tAUC:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['auc_avg'],metricsOverall['auc_std']))
    # print('\tAUC Micro:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['auc_micro_avg'],metricsOverall['auc_micro_std']))

    return metricsOverall
Example #9
0
def predict(model, input, labels, meta):
    if RP['edge_prediction']:
        partitioner = PermutationPartitioner(len(input[0]), len(input[0]) / RP['num_partitions'])
    else:
        partitioner = PermutationPartitioner(len(input), len(input) / RP['num_partitions'])
    iterations = RP['num_partitions']**2

    metrics = {
        'r2': np.zeros((labels.shape[1], iterations)),
        'mse': np.zeros((labels.shape[1], iterations)),
        'mae': np.zeros((labels.shape[1], iterations)),
    }

    # first denormalize labels, so we do it only once
    labels = data.denormalize(labels, meta)

    for iteration in range(iterations):
        print('\titer:\t{}/{}'.format(iteration+1, iterations))

        part = partitioner.get()

        if RP['edge_prediction']:
            partIn = [input[0][part],input[1][part]]
        else:
            partIn = input[part]
        partLabelsT = labels[part].T
        partPredT = model.predict(partIn, batch_size = RP['batch']).T

        for i in range(labels.shape[1]):
            metrics['r2'][i][iteration] = computeR2(partPredT[i], partLabelsT[i])
            metrics['mse'][i][iteration] = computeMSE(partPredT[i], partLabelsT[i])
            metrics['mae'][i][iteration] = computeMAE(partPredT[i], partLabelsT[i])

        del partIn
        del partLabelsT
        del partPredT

    metricsPerLabel = {
        'r2_avg': np.nanmean(metrics['r2'], axis = 1),
        'r2_std': np.nanstd(metrics['r2'], axis = 1),
        'mse_avg': np.nanmean(metrics['mse'], axis = 1),
        'mse_std': np.nanstd(metrics['mse'], axis = 1),
        'mae_avg': np.nanmean(metrics['mae'], axis = 1),
        'mae_std': np.nanstd(metrics['mae'], axis = 1),
    }

    metricsOverall = {
        'r2_avg': np.nanmean(metrics['r2']),
        'r2_std': np.nanstd(metrics['r2']),
        'mse_avg': np.nanmean(metrics['mse']),
        'mse_std': np.nanstd(metrics['mse']),
        'mae_avg': np.nanmean(metrics['mae']),
        'mae_std': np.nanstd(metrics['mae']),
    }

    for i,labelName in enumerate(RD['labels']):
        print('{}/{} - {}:'.format(i+1, len(RD['labels']),labelName))
        print('\tR2:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsPerLabel['r2_avg'][i],metricsPerLabel['r2_std'][i]))
        print('\tMSE:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsPerLabel['mse_avg'][i],metricsPerLabel['mse_std'][i]))
        print('\tMAE:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsPerLabel['mae_avg'][i],metricsPerLabel['mae_std'][i]))

    print('Overall metrics:')
    print('\tR2:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['r2_avg'],metricsOverall['r2_std']))
    print('\tMSE:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['mse_avg'],metricsOverall['mse_std']))
    print('\tMAE:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['mae_avg'],metricsOverall['mae_std']))

    return metricsOverall
def regularize_state_Soft(states, rel_attrs, stat):
    """
    :param states: B x N x state_dim
    :param rel_attrs: B x N x N x relation_dim
    :param stat: [xxx]
    :return new states: B x N x state_dim
    """
    states_denorm = denormalize([states], [stat[1]], var=True)[0]
    states_denorm_acc = denormalize([states.clone()], [stat[1]], var=True)[0]

    rel_attrs = rel_attrs[0]

    rel_attrs_np = rel_attrs.detach().cpu().numpy()

    def get_rel_id(x):
        return np.where(x > 0)[0][0]

    B, N, state_dim = states.size()
    count = Variable(
        torch.FloatTensor(np.zeros((1, N, 1, 8))).to(states.device))

    for i in range(N):
        for j in range(N):

            if i == j:
                assert get_rel_id(
                    rel_attrs_np[i, j]) % 9 == 0  # rel_attrs[i, j, 0] == 1
                count[:, i, :, :] += 1
                continue

            assert torch.sum(rel_attrs[i, j]) <= 1

            if torch.sum(rel_attrs[i, j]) == 0:
                continue

            if get_rel_id(
                    rel_attrs_np[i, j]) % 9 == 1:  # rel_attrs[i, j, 1] == 1:
                assert get_rel_id(
                    rel_attrs_np[j, i]) % 9 == 2  # rel_attrs[j, i, 2] == 1
                x0 = 1
                y0 = 3
                x1 = 0
                y1 = 2
                idx = 1
            elif get_rel_id(
                    rel_attrs_np[i, j]) % 9 == 2:  # rel_attrs[i, j, 2] == 1:
                assert get_rel_id(
                    rel_attrs_np[j, i]) % 9 == 1  # rel_attrs[j, i, 1] == 1
                x0 = 3
                y0 = 1
                x1 = 2
                y1 = 0
                idx = 2
            elif get_rel_id(
                    rel_attrs_np[i, j]) % 9 == 3:  # rel_attrs[i, j, 3] == 1:
                assert get_rel_id(
                    rel_attrs_np[j, i]) % 9 == 4  # rel_attrs[j, i, 4] == 1
                x0 = 0
                y0 = 1
                x1 = 2
                y1 = 3
                idx = 3
            elif get_rel_id(
                    rel_attrs_np[i, j]) % 9 == 4:  # rel_attrs[i, j, 4] == 1:
                assert get_rel_id(
                    rel_attrs_np[j, i]) % 9 == 3  # rel_attrs[j, i, 3] == 1
                x0 = 1
                y0 = 0
                x1 = 3
                y1 = 2
                idx = 4
            elif get_rel_id(
                    rel_attrs_np[i, j]) % 9 == 5:  # rel_attrs[i, j, 5] == 1:
                assert get_rel_id(
                    rel_attrs_np[j, i]) % 9 == 8  # rel_attrs[j, i, 8] == 1
                x = 0
                y = 3
                idx = 5
            elif get_rel_id(
                    rel_attrs_np[i, j]) % 9 == 8:  # rel_attrs[i, j, 8] == 1:
                assert get_rel_id(
                    rel_attrs_np[j, i]) % 9 == 5  # rel_attrs[j, i, 5] == 1
                x = 3
                y = 0
                idx = 8
            elif get_rel_id(
                    rel_attrs_np[i, j]) % 9 == 6:  # rel_attrs[i, j, 6] == 1:
                assert get_rel_id(
                    rel_attrs_np[j, i]) % 9 == 7  # rel_attrs[j, i, 7] == 1
                x = 1
                y = 2
                idx = 6
            elif get_rel_id(
                    rel_attrs_np[i, j]) % 9 == 7:  # rel_attrs[i, j, 7] == 1:
                assert get_rel_id(
                    rel_attrs_np[j, i]) % 9 == 6  # rel_attrs[j, i, 6] == 1
                x = 2
                y = 1
                idx = 7
            else:
                AssertionError("Unknown rel_attr %f" % rel_attrs[i, j])

            if idx < 5:
                # if connect by two points
                x0 *= 2
                y0 *= 2
                x1 *= 2
                y1 *= 2
                count[:, i, :, x0:x0 + 2] += 1
                count[:, i, :, x1:x1 + 2] += 1
                states_denorm_acc[:, i, x0:x0 + 2] += states_denorm[:, j,
                                                                    y0:y0 + 2]
                states_denorm_acc[:, i, x0 + 8:x0 +
                                  10] += states_denorm[:, j, y0 + 8:y0 + 10]
                states_denorm_acc[:, i, x1:x1 + 2] += states_denorm[:, j,
                                                                    y1:y1 + 2]
                states_denorm_acc[:, i, x1 + 8:x1 +
                                  10] += states_denorm[:, j, y1 + 8:y1 + 10]

            else:
                # if connected by a corner
                x *= 2
                y *= 2
                count[:, i, :, x:x + 2] += 1
                states_denorm_acc[:, i, x:x + 2] += states_denorm[:, j,
                                                                  y:y + 2]
                states_denorm_acc[:, i,
                                  x + 8:x + 10] += states_denorm[:, j,
                                                                 y + 8:y + 10]

    states_denorm = states_denorm_acc.view(B, N, 2, state_dim // 2) / count
    states_denorm = states_denorm.view(B, N, state_dim)

    return normalize([states_denorm], [stat[1]], var=True)[0]
Example #11
0
def classify(model, input, labels, meta):
    if RP['edge_prediction']:
        partitioner = PermutationPartitioner(len(input[0]), len(input[0]) / RP['num_partitions'])
    else:
        partitioner = PermutationPartitioner(len(input), len(input) / RP['num_partitions'])
    iterations = RP['num_partitions']**2

    metrics = {
        'acc': np.zeros((labels.shape[1], iterations)),
        'log_loss': np.zeros((labels.shape[1], iterations)),
        'auc': np.zeros((labels.shape[1], iterations)),
        'confusion': np.zeros((labels.shape[1], iterations, 2, 2)),
    }

    # first denormalize labels, so we do it only once
    labels = data.denormalize(labels, meta)

    for iteration in range(iterations):
        print('\titer:\t{}/{}'.format(iteration, iterations))

        part = partitioner.get()

        if RP['edge_prediction']:
            partIn = [input[0][part],input[1][part]]
        else:
            partIn = input[part]
        partLabelsT = labels[part].T
        partPredT = model.predict(partIn, batch_size = RP['batch']).T

        for i in range(labels.shape[1]):
            confusion = computeConfusion(partPredT[i], partLabelsT[i])

            metrics['confusion'][i][iteration] = confusion
            metrics['acc'][i][iteration] = (confusion[0][0]+confusion[1][1]) / confusion.sum()
            metrics['log_loss'][i][iteration] = utility.logloss(partPredT[i],partLabelsT[i],RP['classify_label_neg'],RP['classify_label_pos'])
            metrics['auc'][i][iteration] = computeAUC(partPredT[i], partLabelsT[i])

        del partIn
        del partLabelsT
        del partPredT

    metricsPerLabel = {
        'acc_avg': np.nanmean(metrics['acc'], axis = 1),
        'acc_std': np.nanstd(metrics['acc'], axis = 1),
        'log_loss_avg': np.nanmean(metrics['log_loss'], axis = 1),
        'log_loss_std': np.nanstd(metrics['log_loss'], axis = 1),
        'auc_avg': np.nanmean(metrics['auc'], axis = 1),
        'auc_std': np.nanstd(metrics['auc'], axis = 1)
    }

    metricsOverall = {
        'acc_avg': np.nanmean(metrics['acc']),
        'acc_std': np.nanstd(metrics['acc']),
        'log_loss_avg': np.nanmean(metrics['log_loss']),
        'log_loss_std': np.nanstd(metrics['log_loss']),
        'auc_avg': np.nanmean(metrics['auc']),
        'auc_std': np.nanstd(metrics['auc'])
    }

    for i,labelName in enumerate(RD['labels']):
        print('{}/{} - {}:'.format(i+1, len(RD['labels']),labelName))
        print('\tACC:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsPerLabel['acc_avg'][i],metricsPerLabel['acc_std'][i]))
        print('\tLogLos:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsPerLabel['log_loss_avg'][i],metricsPerLabel['log_loss_std'][i]))
        print('\tAUC:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsPerLabel['auc_avg'][i],metricsPerLabel['auc_std'][i]))

    print('Overall metrics:')
    print('\tACC:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['acc_avg'],metricsOverall['acc_std']))
    print('\tLogLos:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['log_loss_avg'],metricsOverall['log_loss_std']))
    print('\tAUC:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['auc_avg'],metricsOverall['auc_std']))

    return metricsOverall
Example #12
0
        # rollout and calculate distance to goal
        if args.env == 'Rope':
            state_cur_v = to_var(state_cur, use_gpu)[None, :, :]

            for j in range(step, args.roll_step):
                action_cur = control_v[j:j+1]
                action_cur_normalized = normalize([action_cur], [stat[2]], var=True)[0]
                state_cur_normalized = normalize([state_cur_v], [stat[1]], var=True)[0]

                # print(attr_normalized.size(), state_cur_normalized.size(), action_cur_normalized.size())
                with torch.set_grad_enabled(True):
                    pred = model([attr_normalized, state_cur_normalized, Rr_batch, Rs_batch, Ra_batch],
                                 args.pstep, action=action_cur_normalized)

                stat_vel = stat[1][args.position_dim:, :]
                pred = denormalize([pred], [stat_vel], var=True)[0]
                assert pred.requires_grad
                state_cur_v[:, :, :args.position_dim] = state_cur_v[:, :, :args.position_dim] + pred * args.dt
                state_cur_v[:, :, args.position_dim:] = pred

            loss = criterionMSE(
                state_cur_v[:, :args.n_particle, :args.position_dim],
                state_goal_v[:, :args.n_particle, :args.position_dim])

        elif args.env == 'Box':
            # print('actions', actions)
            # print('latents', latents)
            actions_cur = actions.clone()
            latents_cur = latents.clone()

            for j in range(step, args.roll_step):
Example #13
0
 def get_pred(TMP, WDIR, WSPD):
     return data.denormalize(
         get_prediction([data.normalize_in([TMP, WDIR, WSPD])]))[0][0]
Example #14
0
 def get_pred(TMP, WDIR, WSPD):
     return data.denormalize(get_prediction([data.normalize_in([TMP, WDIR, WSPD])]))[0][0]
Example #15
0
def eval(idx_rollout, video=True):
    print(f'\n=== Forward Simulation on Example {roll_idx} ===')

    seq_data = load_data(prepared_names, os.path.join(data_dir, str(idx_rollout) + '.rollout.h5'))
    attrs, states, actions, rel_attrs = [to_var(d.copy(), use_gpu=use_gpu) for d in seq_data]

    seq_data = denormalize(seq_data, stat)
    attrs_gt, states_gt, action_gt = seq_data[:3]

    param_file = os.path.join(data_dir, str(idx_rollout // args.group_size) + '.param')
    param = torch.load(param_file)
    engine.init(param)

    '''
    fit data
    '''
    fit_data = get_more_trajectories(roll_idx)
    fit_data = [to_var(d, use_gpu=use_gpu) for d in fit_data]
    bs = args.fit_num

    ''' T x N x D (denormalized)'''
    states_pred = states_gt.copy()
    states_pred[1:] = 0

    ''' T x N x D (normalized)'''
    s_pred = states.clone()

    '''
    reconstruct loss
    '''
    attrs_flat = get_flat(fit_data[0])
    states_flat = get_flat(fit_data[1])
    actions_flat = get_flat(fit_data[2])
    rel_attrs_flat = get_flat(fit_data[3])

    g = model.to_g(attrs_flat, states_flat, rel_attrs_flat, args.pstep)
    g = g.view(torch.Size([bs, args.time_step]) + g.size()[1:])

    G_tilde = g[:, :-1]
    H_tilde = g[:, 1:]
    U_tilde = fit_data[2][:, :-1]

    G_tilde = get_flat(G_tilde, keep_dim=True)
    H_tilde = get_flat(H_tilde, keep_dim=True)
    U_tilde = get_flat(U_tilde, keep_dim=True)

    _t = time.time()
    A, B, fit_err = model.system_identify(
        G=G_tilde, H=H_tilde, U=U_tilde, rel_attrs=fit_data[3][:1, 0], I_factor=args.I_factor)
    _t = time.time() - _t

    '''
    predict
    '''

    g = model.to_g(attrs, states, rel_attrs, args.pstep)

    pred_g = None
    for step in range(0, args.time_step - 1):
        # prepare input data

        if step == 0:
            current_s = states[step:step + 1]
            current_g = g[step:step + 1]
            states_pred[step] = states_gt[step]
        else:
            '''current state'''
            if args.eval_type == 'valid':
                current_s = states[step:step + 1]
            elif args.eval_type == 'rollout':
                current_s = s_pred[step:step + 1]

            '''current g'''
            if args.eval_type in {'valid', 'rollout'}:
                current_g = model.to_g(attrs[step:step + 1], current_s, rel_attrs[step:step + 1], args.pstep)
            elif args.eval_type == 'koopman':
                current_g = pred_g

        '''next g'''
        pred_g = model.step(g=current_g, u=actions[step:step + 1], rel_attrs=rel_attrs[step:step + 1])

        '''decode s'''
        pred_s = model.to_s(attrs=attrs[step:step + 1], gcodes=pred_g,
                            rel_attrs=rel_attrs[step:step + 1], pstep=args.pstep)

        pred_s_np_denorm = denormalize([to_np(pred_s)], [stat[1]])[0]

        states_pred[step + 1:step + 2] = pred_s_np_denorm
        d = args.state_dim // 2
        states_pred[step + 1:step + 2, :, :d] = states_pred[step:step + 1, :, :d] + \
                                                args.dt * states_pred[step + 1:step + 2, :, d:]

        s_pred_next = normalize([states_pred[step + 1:step + 2]], [stat[1]])[0]
        s_pred[step + 1:step + 2] = to_var(s_pred_next, use_gpu=use_gpu)

    if video:
        engine.render(states_pred, seq_data[2], param, act_scale=args.act_scale, video=True, image=True,
                      path=os.path.join(args.evalf, str(idx_rollout) + '.pred'),
                      states_gt=states_gt)
Example #16
0
    X = []
    y = []
    p = []
    cumulative_loss = 0
    for data, label in data_iter:
        data = data.as_in_context(ctx, )
        label = label.as_in_context(ctx, )
        if data.shape[0] < batch_size:
            continue
        output = net(data)  # (n,w)
        loss = square_loss(output, label[:, :, 0])
        cumulative_loss += nd.mean(loss).asscalar()
        print("iter %d, loss: %e" % (i, cumulative_loss))
        X.append(np.squeeze(data[0].asnumpy()))
        y.append(np.squeeze(label[0].asnumpy()))
        p.append(output[0].asnumpy())
        if i == 0:
            print(data.shape, label.shape, output.shape)
        i += 1
    print("predict_loss: %e," % (cumulative_loss))
    return X, y, p


X, y, predicted = model_predict(net, eval_iter, batch_size)
# X: (n, w, c)

X = [denormalize(a, data_scale, axis=1) for a in X]
y = [denormalize(a, label_scale, axis=1) for a in y]
predicted = [denormalize(a, label_scale, axis=1) for a in predicted]
save_plot(X, y, predicted, 'try4_ivi.out')
def mpc_qp(g_cur,
           g_goal,
           time_cur,
           T,
           rel_attrs,
           A_t,
           B_t,
           Q,
           R,
           node_attrs=None,
           actions=None,
           gt_info=None):
    """
    Model Predictive Control + Quadratic Programming
    :param rel_attrs: N x N x relation_dim
    :param node_attrs: N x attributes_dim
    :return action sequence u: T - 1 x N  x action_dim
    """

    n_obj = engine.num_obj
    constraints = []

    if not args.baseline:
        D = args.g_dim
    else:
        D = g_goal.shape[-1]

    if args.fit_type == 'structured':
        dim_a = args.action_dim
        g = cp.Variable((T * n_obj, D))
        u = cp.Variable(((T - 1) * n_obj, args.action_dim))
        augG = cp.Variable(((T - 1) * n_obj, D * args.relation_dim))
        augU = cp.Variable(
            ((T - 1) * n_obj, args.action_dim * args.relation_dim))

        for t in range(T - 1):
            st_idx = t * n_obj
            ed_idx = (t + 1) * n_obj
            for r in range(args.relation_dim):
                constraints.append(
                    augG[st_idx:ed_idx, r * D:(r + 1) *
                         D] == rel_attrs[:, :, r] @ g[st_idx:ed_idx])
            for r in range(args.relation_dim):
                constraints.append(
                    augU[st_idx:ed_idx, r * dim_a:(r + 1) *
                         dim_a] == rel_attrs[:, :, r] @ u[st_idx:ed_idx])

        cost = 0

        for idx in range(n_obj):
            # constrain the initial g
            constraints.append(g[idx] == g_cur[idx])

            for t in range(1, T):
                cur_idx = t * n_obj + idx
                prv_idx = (t - 1) * n_obj + idx

                zero_normed = -stat[2][:, 0] / stat[2][:, 1]
                act_scale_max_normed = (args.act_scale -
                                        stat[2][:, 0]) / stat[2][:, 1]
                act_scale_min_normed = (-args.act_scale -
                                        stat[2][:, 0]) / stat[2][:, 1]
                constraints.append(u[prv_idx] >= act_scale_min_normed)
                constraints.append(u[prv_idx] <= act_scale_max_normed)

                if args.env == 'Rope':
                    if idx == 0:
                        # first mass: action_y = 0 (no action_y now)
                        pass
                    else:
                        # other mass: action_x = action_y = 0
                        constraints.append(u[prv_idx][:] == zero_normed)

                elif args.env in ['Soft', 'Swim']:
                    if node_attrs[idx, 0] < 1e-6:
                        # if there is no actuation
                        constraints.append(u[prv_idx][:] == zero_normed)
                    else:
                        pass

                constraints.append(g[cur_idx] == A_t @ augG[prv_idx] +
                                   B_t @ augU[prv_idx])
                # penalize large actions
                cost += quad_form(u[prv_idx] - zero_normed, R)
            cost += quad_form(g[(T - 1) * n_obj + idx] - g_goal[idx], Q)

    elif args.fit_type == 'unstructured':

        zero_normed = -stat[2][:, 0] / stat[2][:, 1]

        g = cp.Variable((T, n_obj * args.g_dim))
        u = cp.Variable((T - 1, n_obj * args.action_dim))

        cost = 0

        constraints.append(g[0] == g_cur.ravel())

        for t in range(1, T):

            act_scale_normed = (args.act_scale - stat[2][:, 0]) / stat[2][:, 1]
            act_scale_normed = np.repeat(act_scale_normed, n_obj, 0)
            constraints.append(u[t - 1] >= -act_scale_normed)
            constraints.append(u[t - 1] <= act_scale_normed)

            if args.env == 'Rope':
                # set action on balls to zeros expect the first one
                for idx in range(1, n_obj):
                    constraints.append(u[t - 1][idx] == zero_normed)

            elif args.env in ['Soft', 'Swim']:
                for idx in range(0, n_obj):
                    if node_attrs[idx, 0] < 1e-6:
                        constraints.append(
                            u[t - 1][idx * args.action_dim:(idx + 1) *
                                     args.action_dim] == zero_normed)

            constraints.append(g[t] == A_t @ g[t - 1] + B_t @ u[t - 1])

            for i in range(n_obj):
                cost += quad_form(
                    u[t - 1][i * args.action_dim:(i + 1) * args.action_dim] -
                    zero_normed, R)

        for i in range(n_obj):
            cost += quad_form(
                g[T - 1][i * args.g_dim:(i + 1) * args.g_dim] - g_goal[i], Q)

    elif args.fit_type == 'diagonal':

        zero_normed = -stat[2][:, 0] / stat[2][:, 1]

        g = cp.Variable((T, n_obj * args.g_dim))
        u = cp.Variable((T - 1, n_obj * args.action_dim))

        cost = 0
        constraints.append(g[0] == g_cur.ravel())

        for t in range(1, T):
            act_scale_normed = (args.act_scale - stat[2][:, 0]) / stat[2][:, 1]
            act_scale_normed = np.repeat(act_scale_normed, n_obj, 0)
            constraints.append(u[t - 1] >= -act_scale_normed)
            constraints.append(u[t - 1] <= act_scale_normed)
            if args.env == 'Rope':
                # set action on balls to zeros expect the first one
                for idx in range(1, n_obj):
                    constraints.append(u[t - 1][idx] == zero_normed)
            elif args.env in ['Soft', 'Swim']:
                for idx in range(0, n_obj):
                    if node_attrs[idx, 0] < 1e-6:
                        constraints.append(
                            u[t - 1][idx * args.action_dim:(idx + 1) *
                                     args.action_dim] == zero_normed)

            for i in range(n_obj):
                t1 = A_t @ g[t - 1][i * args.g_dim:(i + 1) * args.g_dim]
                t2 = B_t @ u[t - 1][i * args.action_dim:(i + 1) *
                                    args.action_dim]
                if args.env == 'Rope':
                    t2 = t2[:, 0]
                constraints.append(g[t][i * args.g_dim:(i + 1) *
                                        args.g_dim] == t1 + t2)
                cost += quad_form(
                    u[t - 1][i * args.action_dim:(i + 1) * args.action_dim] -
                    zero_normed, R)
        for i in range(n_obj):
            cost += quad_form(
                g[T - 1][i * args.g_dim:(i + 1) * args.g_dim] - g_goal[i], Q)

    objective = cp.Minimize(cost)
    prob = cp.Problem(objective, constraints)
    result = prob.solve()

    u_val = u.value
    g_val = g.value
    u = u_val.reshape(T - 1, n_obj, args.action_dim)

    u = denormalize([u], [stat[2]])[0]
    g = g_val.reshape(T, n_obj, D)

    return u
def shoot_mpc_qp(roll_idx):
    print(f'\n=== Model Based Control on Example {roll_idx} ===')
    '''
    load data
    '''
    seq_data = load_data(prepared_names,
                         os.path.join(data_dir,
                                      str(roll_idx) + '.rollout.h5'))
    attrs, states, actions, rel_attrs = [
        to_var(d.copy(), use_gpu=use_gpu) for d in seq_data
    ]

    seq_data = denormalize(seq_data, stat)
    attrs_gt, states_gt, actions_gt = seq_data[:3]
    '''
    setup engine
    '''
    param_file = os.path.join(data_dir,
                              str(roll_idx // args.group_size) + '.param')
    param = torch.load(param_file)
    engine.init(param)
    n_obj = engine.num_obj
    '''
    fit koopman
    '''
    print('===> system identification!')
    fit_data = get_more_trajectories(roll_idx)
    fit_data = [to_var(d, use_gpu=use_gpu) for d in fit_data]
    bs = args.fit_num

    attrs_flat = get_flat(fit_data[0])
    states_flat = get_flat(fit_data[1])
    actions_flat = get_flat(fit_data[2])
    rel_attrs_flat = get_flat(fit_data[3])

    g = model.to_g(attrs_flat, states_flat, rel_attrs_flat, args.pstep)
    g = g.view(torch.Size([bs, args.time_step]) + g.size()[1:])

    G_tilde = g[:, :-1]
    H_tilde = g[:, 1:]
    U_left = fit_data[2][:, :-1]

    G_tilde = get_flat(G_tilde, keep_dim=True)
    H_tilde = get_flat(H_tilde, keep_dim=True)
    U_left = get_flat(U_left, keep_dim=True)

    A, B, fit_err = model.system_identify(G=G_tilde,
                                          H=H_tilde,
                                          U=U_left,
                                          rel_attrs=fit_data[3][:1, 0],
                                          I_factor=args.I_factor)
    '''
    shooting
    '''
    print('===> model based control start!')
    # current can not set engine to a middle state
    assert args.roll_start == 0

    start_step = args.roll_start
    g_start_v = model.to_g(attrs=attrs[start_step:start_step + 1],
                           states=states[start_step:start_step + 1],
                           rel_attrs=rel_attrs[start_step:start_step + 1],
                           pstep=args.pstep)
    g_start = to_np(g_start_v[0])

    if args.env == 'Rope':
        goal_step = args.roll_step + args.roll_start
    elif args.env == 'Soft':
        goal_step = args.roll_step + args.roll_start
    elif args.env == 'Swim':
        goal_step = args.roll_step + args.roll_start

    g_goal_v = model.to_g(attrs=attrs[goal_step:goal_step + 1],
                          states=states[goal_step:goal_step + 1],
                          rel_attrs=rel_attrs[goal_step:goal_step + 1],
                          pstep=args.pstep)
    g_goal = to_np(g_goal_v[0])

    states_start = states_gt[start_step]
    states_goal = states_gt[goal_step]
    states_roll = np.zeros((args.roll_step + 1, n_obj, args.state_dim))
    states_roll[0] = states_start

    control = np.zeros((args.roll_step + 1, n_obj, args.action_dim))
    # control_v = to_var(control, use_gpu, requires_grad=True)
    bar = ProgressBar()
    for step in bar(range(args.roll_step)):
        states_input = normalize([states_roll[step:step + 1]], [stat[1]])[0]
        states_input_v = to_var(states_input, use_gpu=use_gpu)
        g_cur_v = model.to_g(attrs=attrs[:1],
                             states=states_input_v,
                             rel_attrs=rel_attrs[:1],
                             pstep=args.pstep)
        g_cur = to_np(g_cur_v[0])
        '''
        setup parameters
        '''
        T = args.roll_step - step + 1

        A_v, B_v = model.A, model.B
        A_t = to_np(A_v[0]).T
        B_t = to_np(B_v[0]).T

        if not args.baseline:
            Q = np.eye(args.g_dim)
        else:
            Q = np.eye(g_goal.shape[-1])

        if args.env == 'Rope':
            R_factor = 0.01
        elif args.env == 'Soft':
            R_factor = 0.001
        elif args.env == 'Swim':
            R_factor = 0.0001
        else:
            assert False

        R = np.eye(args.action_dim) * R_factor
        '''
        generate action
        '''
        rel_attrs_np = to_np(rel_attrs)[0]
        assert args.optim_type == 'qp'
        if step % args.feedback == 0:
            node_attrs = attrs_gt[0] if args.env in ['Soft', 'Swim'] else None
            u = mpc_qp(g_cur,
                       g_goal,
                       step,
                       T,
                       rel_attrs_np,
                       A_t,
                       B_t,
                       Q,
                       R,
                       node_attrs=node_attrs,
                       actions=to_np(actions[step:]),
                       gt_info=[
                           param, states_gt[goal_step:goal_step + 1],
                           attrs[step:step + T], rel_attrs[step:step + T]
                       ])
        else:
            u = u[1:]
            pass
        '''
        execute action
        '''
        engine.set_action(u[0])  # execute the first action
        control[step] = engine.get_action()
        engine.step()
        states_roll[step + 1] = engine.get_state()
    '''
    render
    '''
    engine.render(states_roll,
                  control,
                  param,
                  act_scale=args.act_scale,
                  video=True,
                  image=True,
                  path=os.path.join(args.shootf,
                                    str(roll_idx) + '.shoot'),
                  states_gt=np.tile(states_gt[goal_step:goal_step + 1],
                                    (args.roll_step + 1, 1, 1)),
                  count_down=True,
                  gt_border=True)

    states_result = states_roll[args.roll_step]

    states_goal_normalized = normalize([states_goal], [stat[1]])[0]
    states_result_normalized = normalize([states_result], [stat[1]])[0]

    return norm(states_goal - states_result), (states_goal, states_result,
                                               states_goal_normalized,
                                               states_result_normalized)
def predict(model, input, labels, meta):
    if RP['edge_prediction']:
        partitioner = PermutationPartitioner(
            len(input[0]),
            len(input[0]) / RP['num_partitions'])
    else:
        partitioner = PermutationPartitioner(len(input),
                                             len(input) / RP['num_partitions'])
    iterations = RP['num_partitions']**2

    metrics = {
        'r2': np.zeros((labels.shape[1], iterations)),
        'mse': np.zeros((labels.shape[1], iterations)),
        'mae': np.zeros((labels.shape[1], iterations)),
    }

    # first denormalize labels, so we do it only once
    labels = data.denormalize(labels, meta)

    for iteration in range(iterations):
        print('\titer:\t{}/{}'.format(iteration + 1, iterations))

        part = partitioner.get()

        if RP['edge_prediction']:
            partIn = [input[0][part], input[1][part]]
        else:
            partIn = input[part]
        partLabelsT = labels[part].T
        partPredT = model.predict(partIn, batch_size=RP['batch']).T

        for i in range(labels.shape[1]):
            metrics['r2'][i][iteration] = computeR2(partPredT[i],
                                                    partLabelsT[i])
            metrics['mse'][i][iteration] = computeMSE(partPredT[i],
                                                      partLabelsT[i])
            metrics['mae'][i][iteration] = computeMAE(partPredT[i],
                                                      partLabelsT[i])

        del partIn
        del partLabelsT
        del partPredT

    metricsPerLabel = {
        'r2_avg': np.nanmean(metrics['r2'], axis=1),
        'r2_std': np.nanstd(metrics['r2'], axis=1),
        'mse_avg': np.nanmean(metrics['mse'], axis=1),
        'mse_std': np.nanstd(metrics['mse'], axis=1),
        'mae_avg': np.nanmean(metrics['mae'], axis=1),
        'mae_std': np.nanstd(metrics['mae'], axis=1),
    }

    metricsOverall = {
        'r2_avg': np.nanmean(metrics['r2']),
        'r2_std': np.nanstd(metrics['r2']),
        'mse_avg': np.nanmean(metrics['mse']),
        'mse_std': np.nanstd(metrics['mse']),
        'mae_avg': np.nanmean(metrics['mae']),
        'mae_std': np.nanstd(metrics['mae']),
    }

    for i, labelName in enumerate(RD['labels']):
        print('{}/{} - {}:'.format(i + 1, len(RD['labels']), labelName))
        print('\tR2:\t{0:.3f}\t+/-\t{1:.3f}'.format(
            metricsPerLabel['r2_avg'][i], metricsPerLabel['r2_std'][i]))
        print('\tMSE:\t{0:.3f}\t+/-\t{1:.3f}'.format(
            metricsPerLabel['mse_avg'][i], metricsPerLabel['mse_std'][i]))
        print('\tMAE:\t{0:.3f}\t+/-\t{1:.3f}'.format(
            metricsPerLabel['mae_avg'][i], metricsPerLabel['mae_std'][i]))

    print('Overall metrics:')
    print('\tR2:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['r2_avg'],
                                                metricsOverall['r2_std']))
    print('\tMSE:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['mse_avg'],
                                                 metricsOverall['mse_std']))
    print('\tMAE:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['mae_avg'],
                                                 metricsOverall['mae_std']))

    return metricsOverall
def discreteClassify(model, input, labels, meta):
    partitioner = PermutationPartitioner(len(input),
                                         len(input) / RP['num_partitions'])
    iterations = RP['num_partitions']**2

    metrics = {
        'acc': np.zeros((iterations)),
        'log_loss': np.zeros((iterations)),
        # 'auc': np.zeros((iterations)),
        # 'auc_micro': np.zeros((iterations))
    }

    # first denormalize labels, so we do it only once
    labels = data.denormalize(labels, meta)

    for iteration in range(iterations):
        # print('\titer:\t{}/{}'.format(iteration, iterations))

        part = partitioner.get()

        partIn = input[part]
        partLabels = labels[part]
        partPred = model.predict(partIn, batch_size=RP['batch'])
        binarizedPred = np.zeros((len(partPred), len(partPred[0])))
        """
        for row in range(len(partLabels)):
            for idx, val in enumerate(partLabels[row]):
                if val == 1:
                    sys.stdout.write('{}, '.format(idx))
            for val in partPred[row]:
                sys.stdout.write('{}, '.format(val))
            sys.stdout.write('\n')
            sys.stdout.flush()
        """

        for i in range(len(partPred)):
            maxValue = 0
            maxIndex = 0
            for index in range(len(partPred[i])):
                value = partPred[i][index]
                if value > maxValue:
                    maxValue = value
                    maxIndex = index
            binarizedPred[i][maxIndex] = 1

        metrics['acc'][iteration] = sk.metrics.accuracy_score(
            partLabels, binarizedPred)
        metrics['log_loss'][iteration] = sk.metrics.log_loss(
            partLabels, binarizedPred)
        '''
        keepVec = []
        for col in range(len(partLabels[0])):
            wasOne = 0
            for row in range(len(partLabels)):
                if partLabels[row][col] == 1:
                    wasOne = 1
                    break
            if wasOne:
                keepVec.append(col)

        cutLabels = np.zeros((len(partLabels), len(keepVec)))
        cutPreds  = np.zeros((len(partLabels), len(keepVec)))
        for idx, keep in enumerate(keepVec):
            for row in range(len(partLabels)):
                cutLabels[row][idx] = partLabels[row][keep]
                cutPreds[row][idx]  = binarizedPred[row][keep]

        metrics['auc'][iteration] = sk.metrics.roc_auc_score(cutLabels,
                cutPreds, average = 'macro')
        metrics['auc_micro'][iteration] = sk.metrics.roc_auc_score(cutLabels,
                cutPreds, average = 'micro')
        '''

    metricsOverall = {
        'acc_avg': np.nanmean(metrics['acc']),
        'acc_std': np.nanstd(metrics['acc']),
        'log_loss_avg': np.nanmean(metrics['log_loss']),
        'log_loss_std': np.nanstd(metrics['log_loss']),
        # 'auc_avg': np.nanmean(metrics['auc']),
        # 'auc_std': np.nanstd(metrics['auc']),
        # 'auc_micro_avg': np.nanmean(metrics['auc_micro']),
        # 'auc_micro_std': np.nanstd(metrics['auc_micro'])
        'auc_avg': None,
        'auc_std': None,
        'auc_micro_avg': None,
        'auc_micro_std': None,
    }

    print('Overall metrics:')
    print('\tACC:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['acc_avg'],
                                                 metricsOverall['acc_std']))
    print('\tLogLos:\t{0:.3f}\t+/-\t{1:.3f}'.format(
        metricsOverall['log_loss_avg'], metricsOverall['log_loss_std']))
    # print('\tAUC:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['auc_avg'],metricsOverall['auc_std']))
    # print('\tAUC Micro:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['auc_micro_avg'],metricsOverall['auc_micro_std']))

    return metricsOverall
def classify(model, input, labels, meta):
    if RP['edge_prediction']:
        partitioner = PermutationPartitioner(
            len(input[0]),
            len(input[0]) / RP['num_partitions'])
    else:
        partitioner = PermutationPartitioner(len(input),
                                             len(input) / RP['num_partitions'])
    iterations = RP['num_partitions']**2

    metrics = {
        'acc': np.zeros((labels.shape[1], iterations)),
        'log_loss': np.zeros((labels.shape[1], iterations)),
        'auc': np.zeros((labels.shape[1], iterations)),
        'confusion': np.zeros((labels.shape[1], iterations, 2, 2)),
    }

    # first denormalize labels, so we do it only once
    labels = data.denormalize(labels, meta)

    for iteration in range(iterations):
        print('\titer:\t{}/{}'.format(iteration, iterations))

        part = partitioner.get()

        if RP['edge_prediction']:
            partIn = [input[0][part], input[1][part]]
        else:
            partIn = input[part]
        partLabelsT = labels[part].T
        partPredT = model.predict(partIn, batch_size=RP['batch']).T

        for i in range(labels.shape[1]):
            confusion = computeConfusion(partPredT[i], partLabelsT[i])

            metrics['confusion'][i][iteration] = confusion
            metrics['acc'][i][iteration] = (confusion[0][0] +
                                            confusion[1][1]) / confusion.sum()
            metrics['log_loss'][i][iteration] = utility.logloss(
                partPredT[i], partLabelsT[i], RP['classify_label_neg'],
                RP['classify_label_pos'])
            metrics['auc'][i][iteration] = computeAUC(partPredT[i],
                                                      partLabelsT[i])

        del partIn
        del partLabelsT
        del partPredT

    metricsPerLabel = {
        'acc_avg': np.nanmean(metrics['acc'], axis=1),
        'acc_std': np.nanstd(metrics['acc'], axis=1),
        'log_loss_avg': np.nanmean(metrics['log_loss'], axis=1),
        'log_loss_std': np.nanstd(metrics['log_loss'], axis=1),
        'auc_avg': np.nanmean(metrics['auc'], axis=1),
        'auc_std': np.nanstd(metrics['auc'], axis=1)
    }

    metricsOverall = {
        'acc_avg': np.nanmean(metrics['acc']),
        'acc_std': np.nanstd(metrics['acc']),
        'log_loss_avg': np.nanmean(metrics['log_loss']),
        'log_loss_std': np.nanstd(metrics['log_loss']),
        'auc_avg': np.nanmean(metrics['auc']),
        'auc_std': np.nanstd(metrics['auc'])
    }

    for i, labelName in enumerate(RD['labels']):
        print('{}/{} - {}:'.format(i + 1, len(RD['labels']), labelName))
        print('\tACC:\t{0:.3f}\t+/-\t{1:.3f}'.format(
            metricsPerLabel['acc_avg'][i], metricsPerLabel['acc_std'][i]))
        print('\tLogLos:\t{0:.3f}\t+/-\t{1:.3f}'.format(
            metricsPerLabel['log_loss_avg'][i],
            metricsPerLabel['log_loss_std'][i]))
        print('\tAUC:\t{0:.3f}\t+/-\t{1:.3f}'.format(
            metricsPerLabel['auc_avg'][i], metricsPerLabel['auc_std'][i]))

    print('Overall metrics:')
    print('\tACC:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['acc_avg'],
                                                 metricsOverall['acc_std']))
    print('\tLogLos:\t{0:.3f}\t+/-\t{1:.3f}'.format(
        metricsOverall['log_loss_avg'], metricsOverall['log_loss_std']))
    print('\tAUC:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['auc_avg'],
                                                 metricsOverall['auc_std']))

    return metricsOverall
Example #22
0
                attr, state, Rr, Rs, Ra, n_particles,
                node_r_idx, node_s_idx, pstep,
                instance_idx, phases_dict, args.verbose_model)
            # print('Time forward', time.time() - st_time)

            # print(vels)

            if args.debug:
                data_nxt_path = os.path.join(args.dataf, 'valid', str(infos[idx]), str(step + 1) + '.h5')
                data_nxt = normalize(load_data(data_names, data_nxt_path), stat)
                label = Variable(torch.FloatTensor(data_nxt[1][:n_particles]).cuda())
                # print(label)
                loss = np.sqrt(criterionMSE(vels, label).item())
                print(loss)

        vels = denormalize([vels.data.cpu().numpy()], [stat[1]])[0]

        if args.env == 'RiceGrip' or args.env == 'FluidShake':
            vels = np.concatenate([vels, v_nxt_gt[step, n_particles:]], 0)
        data[0] = data[0] + vels * args.dt

        if args.env == 'RiceGrip':
            # shifting the history
            # positions, restPositions
            data[1][:, args.position_dim:] = data[1][:, :-args.position_dim]
        data[1][:, :args.position_dim] = vels

        if args.debug:
            data[0] = p_gt[step + 1].copy()
            data[1][:, :args.position_dim] = v_nxt_gt[step]