Exemple #1
0
def get_masks(data):
    """
    Generate train, validation, and test masks and store them in the data object given.

    :param - data: torch_geometric Data object holding node features, edges, and labels
    """
    num_nodes = len(data.y)
    nids = utils.shuffle_ids(list(range(num_nodes)))

    # It is CRITICAL that test split specified below remains in unison with the
    # test split specified for the SVM models in svm.py in order to enable
    # valid comparison of prediction results across the SVM and GNN models.
    val = int(num_nodes * .7)
    test = int(num_nodes * .8)

    train_ids = nids[:val]
    val_ids = nids[val:test]
    test_ids = nids[test:]

    data.train_mask = torch.tensor(
        [1 if i in train_ids else 0 for i in range(num_nodes)],
        dtype=torch.bool)
    data.val_mask = torch.tensor(
        [1 if i in val_ids else 0 for i in range(num_nodes)], dtype=torch.bool)
    data.test_mask = torch.tensor(
        [1 if i in test_ids else 0 for i in range(num_nodes)],
        dtype=torch.bool)
def evaluate(model, data, params):
    """"""
    # only for string labels
    le = LabelEncoder().fit(data['y']['tg'][:])
    lb = LabelBinarizer().fit(data['y']['tg'][:])
    id_hash = {v: k for k, v in enumerate(data['ids'][:])}
    val_ids = sorted(shuffle_ids(params['split']['valid'], id_hash))

    Ypr = []
    for i in trange(0, len(val_ids), params['batch_sz'], ncols=80):
        X = data['X'][val_ids[i:i + params['batch_sz']]]
        M = data['mask'][val_ids[i:i + params['batch_sz']]]
        pred = []
        for j in range(0, X.shape[-2], params['dur'] / 2):
            x = X[:, :, j:j + params['dur']]
            if x.shape[-2] >= params['dur']:
                pred.append(model['tg']['predict'](x))
        Ypr.append(np.array(pred).mean(axis=0))
    Ypr = np.concatenate(Ypr, axis=0)
    Y = lb.transform(data['y']['tg'][val_ids])

    y, ypr = np.argmax(Y, axis=1), np.argmax(Ypr, axis=1)
    y_label = le.inverse_transform(y)
    ypr_label = le.inverse_transform(ypr)

    f1 = f1_score(y, ypr, average='macro')
    ll = log_loss(Y, Ypr)
    print
    print
    print 'LogLoss: {:.4f}'.format(ll)
    print 'F1: {:.4f}'.format(f1)
    print classification_report(y_label, ypr_label)
    print confusion_matrix(y, ypr)

    return f1, ll
Exemple #3
0
def load_splits(embeddings_path):
    """
    Generate train/test splits for the butterfly embeddings. Embeddings 
    need to be generated prior by running node2vec as specified here: 
    https://github.com/koenig125/224W-final-project.

    return: 4-tuple holding the training data, training labels, testing
    data, and testing labels for the BIOSNAP butterfly similarity network.
    """
    labels = load_labels()
    embeddings = utils.load_embeddings(embeddings_path)
    num_nodes = len(labels)

    nids = utils.shuffle_ids(list(range(num_nodes)))
    X = np.array([embeddings[n] for n in nids])
    y = np.array([labels[n] for n in nids])

    # It is CRITICAL that test split specified below remains in unison with the
    # test split specified for the GNN models in train.py in order to enable
    # valid comparison of prediction results across the SVM and GNN models.
    test = int(num_nodes * .8)
    X_train, y_train = X[:test], y[:test]
    X_test, y_test = X[test:], y[test:]
    return X_train, X_test, y_train, y_test
def train(model, data, params, shuffle=True, tblogger=None):
    """"""
    # actual training
    id_hash = {v: k for k, v in enumerate(data['ids'][:])}

    # only for string labels
    lb = LabelBinarizer().fit(data['y']['tg'][:])

    # params['iter'] = 0
    try:
        if params['verbose']:
            epoch = trange(params['n_epochs'],
                           desc='[Loss : -.--] Epoch',
                           ncols=80)
        else:
            epoch = range(params['n_epochs'])

        for n in epoch:
            if shuffle:
                trn_ids = shuffle_ids(params['split']['train'], id_hash)
                val_ids = shuffle_ids(params['split']['valid'], id_hash)
            else:
                trn_ids = [
                    id_hash[x] for x in params['split']['train']
                    if x in id_hash
                ]
                val_ids = [
                    id_hash[x] for x in params['split']['valid']
                    if x in id_hash
                ]

            for i, X_, y_, target in prepare_batch(data, trn_ids, params, lb):

                if params['iter'] % params['report_every'] == 0:
                    # draw validation samples
                    idx_v = sorted(
                        np.random.choice(val_ids,
                                         params['batch_sz'],
                                         replace=False))
                    if target == 'tg':
                        y_v = lb.transform(data['y'][target][idx_v])
                    else:
                        y_v = data['y'][target][idx_v]
                    X_v, y_v = random_crop(data['X'][idx_v],
                                           data['mask'][idx_v], y_v,
                                           params['dur'])

                    c = model[target]['cost'](X_, y_).item()
                    cv = model[target]['cost'](X_v, y_v).item()
                    a = model[target]['acc'](X_, y_).item()
                    av = model[target]['acc'](X_v, y_v).item()

                    if tblogger is not None:
                        tblogger.log_value('%s_cost_tr' % target, c,
                                           params['iter'])
                        tblogger.log_value('%s_cost_vl' % target, cv,
                                           params['iter'])
                        tblogger.log_value('%s_acc_tr' % target, a,
                                           params['iter'])
                        tblogger.log_value('%s_acc_vl' % target, av,
                                           params['iter'])

                    if params['verbose']:
                        epoch.set_description(
                            '[v_loss : {:.4f} / v_acc: {:.4f}]Epoch'.format(
                                cv, av))

                model[target]['train'](X_, y_)
                params['iter'] += 1

    except KeyboardInterrupt as kbe:
        print('User Stopped!')
Exemple #5
0
def train_2_layer_convnet(data, params):
    train, val, test = data
    input_shape = (3, 32, 32)
    output_classes = 10
    conv_layer = Conv2d(input_shape,
                        num_filters=params.num_Filters,
                        kernel_size=params.kernel_size,
                        stride=1,
                        padding=params.pad_size)
    relu = ReLU()
    conv_out_shape = (params.num_Filters, conv_layer.conv_out_ht,
                      conv_layer.conv_out_wd)
    pool_layer = Pooling(conv_out_shape, params.pool_size)
    fully_connected_input_size = params.num_Filters * pool_layer.height_out * pool_layer.width_out
    full_connected_layer = FullyConnectedLayer(fully_connected_input_size, 100)
    relu_final = ReLU()
    final_fcc_layer = FullyConnectedLayer(100, output_classes)
    sfmax_layer = SoftmaxCrossEntropy()

    conv_net_layers = (conv_layer, relu, pool_layer, full_connected_layer,
                       relu_final, final_fcc_layer, sfmax_layer)

    for e in range(params.epochs):
        # train
        train_loss = 0
        ids = shuffle_ids(train['data'])
        train_data = train['data'][ids]
        train_labels = train['labels'][ids]
        data_points = len(train['data'])
        # data_points = 50

        for b in range(0, data_points, params.bsz):
            sfmax, loss_mat = forward_pass_2_layer_convnet(
                conv_net_layers, train_data[b:b + params.bsz, :],
                train_labels[b:b + params.bsz, :])
            loss = np.sum(loss_mat)
            train_loss += loss
            weight_updates = backward_pass_2_layer_convnet(conv_net_layers)
            update_pass_2_layer_convnet(conv_net_layers, weight_updates,
                                        params)
        sfmax, loss_mat = forward_pass_2_layer_convnet(
            conv_net_layers, train['data'][0:data_points, :],
            train['labels'][0:data_points, :])
        loss = np.sum(loss_mat)
        train_loss = loss
        y_hat = np.argmax(sfmax, axis=1)
        # print(y_hat, trainy_num)
        train_error = np.sum([
            y_hat[i] != train['numbers'][i]
            for i in range(0, len(train['numbers']))
        ]) / len(train['numbers'])

        sfmax, val_loss = forward_pass_2_layer_convnet(conv_net_layers,
                                                       val['data'],
                                                       val['labels'])
        y_hat = np.argmax(sfmax, axis=1)
        val_error = np.sum([
            y_hat[i] != val['numbers'][i]
            for i in range(0, len(val['numbers']))
        ]) / len(val['numbers'])

        sfmax, test_loss = forward_pass_2_layer_convnet(
            conv_net_layers, test['data'], test['labels'])
        y_hat = np.argmax(sfmax, axis=1)
        test_error = np.sum([
            y_hat[i] != test['numbers'][i]
            for i in range(0, len(test['numbers']))
        ]) / len(test['numbers'])

        print(
            "Epoch:{} Train Loss:{} Train Error:{} Val Error:{} Test Err: {}".
            format(e, train_loss, train_error, val_error, test_error))
Exemple #6
0
def train_convnet(data, params):
    train, val, test = data
    input_shape = (3, 32, 32)
    output_classes = 10
    conv_layer = Conv2d(input_shape,
                        num_filters=params.num_Filters,
                        kernel_size=params.kernel_size,
                        stride=1,
                        padding=params.pad_size)
    relu = ReLU()
    conv_out_shape = (params.num_Filters, conv_layer.conv_out_ht,
                      conv_layer.conv_out_wd)
    pool_layer = Pooling(conv_out_shape, params.pool_size)
    fully_connected_input_size = params.num_Filters * pool_layer.height_out * pool_layer.width_out
    full_connected_layer = FullyConnectedLayer(fully_connected_input_size,
                                               output_classes)
    sfmax_layer = SoftmaxCrossEntropy()
    conv_net_layers = (conv_layer, relu, pool_layer, full_connected_layer,
                       sfmax_layer)

    for e in range(params.epochs):
        # train
        train_loss = 0
        ids = shuffle_ids(train['data'])
        train_data = train['data'][ids]
        train_labels = train['labels'][ids]
        data_points = len(train['data'])
        # data_points = 50
        grad_check = False
        num_grad = 0.0
        for b in range(0, data_points, params.bsz):
            if grad_check:
                eps = 1e-10
                # W = full_connected_layer.W
                W = conv_layer.W[0, 0]
                W[0, 2] -= eps
                _, loss_mat = forward_pass_convnet(
                    conv_net_layers, train_data[b:b + params.bsz, :],
                    train_labels[b:b + params.bsz, :])
                loss1 = np.sum(loss_mat)
                W[0, 2] += 2 * eps
                _, loss_mat = forward_pass_convnet(
                    conv_net_layers, train_data[b:b + params.bsz, :],
                    train_labels[b:b + params.bsz, :])
                loss2 = np.sum(loss_mat)
                W[0, 2] -= eps
                num_grad = (loss2 - loss1) / (2 * eps)
            sfmax, loss_mat = forward_pass_convnet(
                conv_net_layers, train_data[b:b + params.bsz, :],
                train_labels[b:b + params.bsz, :])
            loss = np.sum(loss_mat)
            train_loss += loss
            weight_updates = backward_pass_convnet(conv_net_layers)
            if grad_check:
                dW, db, prev_dW, prev_db = weight_updates[0]
                print("Num Grad:{} Backward Grad:{}".format(
                    num_grad, dW[0, 0, 0, 2]))
            update_pass_convnet(conv_net_layers, weight_updates, params)
        sfmax, train_loss = forward_pass_convnet(conv_net_layers,
                                                 train['data'],
                                                 train['labels'])
        y_hat = np.argmax(sfmax, axis=1)
        # print(y_hat, trainy_num)
        train_error = np.sum([
            y_hat[i] != train['numbers'][i]
            for i in range(0, len(train['numbers']))
        ]) / len(train['numbers'])

        sfmax, val_loss = forward_pass_convnet(conv_net_layers, val['data'],
                                               val['labels'])
        y_hat = np.argmax(sfmax, axis=1)
        val_error = np.sum([
            y_hat[i] != val['numbers'][i]
            for i in range(0, len(val['numbers']))
        ]) / len(val['numbers'])

        sfmax, test_loss = forward_pass_convnet(conv_net_layers, test['data'],
                                                test['labels'])
        y_hat = np.argmax(sfmax, axis=1)
        test_error = np.sum([
            y_hat[i] != test['numbers'][i]
            for i in range(0, len(test['numbers']))
        ]) / len(test['numbers'])

        print(
            "Epoch:{} Train Loss:{} Train Error:{} Val Error:{} Test Err: {}".
            format(e, train_loss, train_error, val_error, test_error))