Пример #1
0
def train_epoch(dataset, sess, model, train_fetches, valid_fetches, train_writer, test_writer):
    global last_f1
    global lr
    starttime = datetime.datetime.now()
    batch_iter=dataset.batch_iter()
    for i,(X1_batch,X2_batch,y_batch) in tqdm(enumerate(batch_iter)):
        global_step = sess.run(model.global_step)
        if 0 == (global_step + 1) % FLAGS.valid_step:
            valid_cost, precision, recall, f1 = valid_epoch(dataset, sess, model)
            print('Global_step=%d: valid cost=%g; p=%g, r=%g, f1=%g' % (
                global_step, valid_cost, precision, recall, f1))
            print ('cost time:%s',(datetime.datetime.now()-starttime))
            if f1 > last_f1 :
                last_f1 = f1
                saving_path = model.saver.save(sess, model_path+str(f1)+'_', global_step+1)
                print('saved new model to %s ' % saving_path)

        # training
        y_batch = to_categorical(y_batch)
        _batch_size = len(y_batch)
        feed_dict = {model.X1_inputs: X1_batch, model.X2_inputs: X2_batch, model.y_inputs: y_batch,
                     model.batch_size: _batch_size, model.tst: False, model.keep_prob: FLAGS.keep_prob}
        summary, _cost, _, _ = sess.run(train_fetches, feed_dict)  # the cost is the mean cost of one batch
        # valid per 500 steps
        if 0 == (global_step + 1) % 500:
            train_writer.add_summary(summary, global_step)
            batch_id = np.random.randint(0, n_va_batches)  # 随机选一个验证batch
            [X1_batch, X2_batch, y_batch] =dataset.get_vali_item(batch_id)
            y_batch = to_categorical(y_batch)
            _batch_size = len(y_batch)
            feed_dict = {model.X1_inputs: X1_batch, model.X2_inputs: X2_batch, model.y_inputs: y_batch,
                         model.batch_size: _batch_size, model.tst: True, model.keep_prob: 1.0}
            summary, _cost = sess.run(valid_fetches, feed_dict)
            test_writer.add_summary(summary, global_step)
            print ('global_step:%d,loss:%f'%(global_step,_cost))
Пример #2
0
def load_cifar(path):
    """Loads CIFAR10 dataset.

    # Returns
        Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
    """

    num_train_samples = 50000

    x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')
    y_train = np.empty((num_train_samples, ), dtype='uint8')

    for i in range(1, 6):
        fpath = os.path.join(path, 'data_batch_' + str(i))
        (x_train[(i - 1) * 10000:i * 10000, :, :, :],
         y_train[(i - 1) * 10000:i * 10000]) = load_batch(fpath)

    fpath = os.path.join(path, 'test_batch')
    x_test, y_test = load_batch(fpath)

    y_train = np.reshape(y_train, (len(y_train), 1))
    y_test = np.reshape(y_test, (len(y_test), 1))
    # 归一化
    # x_train = x_train.astype(np.float) / 255. - 1.
    # x_test = x_test.astype(np.float) / 255. - 1.
    mean = np.array([123.680, 116.779, 103.939])
    x_train = x_train.astype(np.float) - mean[:, np.newaxis, np.newaxis]
    x_test = x_test.astype(np.float) - mean[:, np.newaxis, np.newaxis]
    return (x_train, to_categorical(y_train)), (x_test, to_categorical(y_test))
def get_xy(
        image_list,
        label_names,
        batch_centers,
        size,
        fc_shape,
        nlabels,
        preload,
        datatype
):
    n_images = len(image_list)
    centers, idx = centers_and_idx(batch_centers, n_images)
    print(''.join([' '] * 15) + 'Loading x')
    x = filter(lambda z: z.any(), get_patches_list(image_list, centers, size, preload))
    x = np.concatenate(x)
    print(''.join([' '] * 15) + '- Concatenation')
    x[idx] = x
    print(''.join([' '] * 15) + 'Loading y')
    y = [np.array([l[c] for c in lc]) for l, lc in izip(labels_generator(label_names), centers)]
    print(''.join([' '] * 15) + '- Concatenation')
    y = np.concatenate(y)
    y[idx] = y
    y_fc = [np.asarray(get_patches(l, lc, fc_shape))
            for l, lc in izip(labels_generator(label_names), centers)]
    y_fc = np.concatenate(y_fc)
    y_fc[idx] = y_fc
    if nlabels <= 2:
        y = y.astype(dtype=np.bool)
        y_fc = y_fc.astype(dtype=np.bool)
    y = [
        to_categorical(y, num_classes=nlabels),
        to_categorical(y_fc, num_classes=nlabels).reshape((len(y_fc), -1, nlabels))
    ]
    return x.astype(dtype=datatype), y
Пример #4
0
    def execute(self, inputs, targets, config_model=None, dataset_name=''):
        model = FactoryModel(
            config_model['name'],
            '{}_{}_holdout-{}'.format(dataset_name, config_model['name'],
                                      self.test_size), config_model['size'],
            config_model['params'], config_model['init']).get_model()

        if self.state.epochs > 0:
            model().load_weights(self.state.weights)
        else:
            #initialize weight and history path for the model
            self.state.weights += model().name + '.h5'
            self.state.history += '{}/history_{}.csv'.format(
                config_model['name'],
                model().name)

        dict_scores = {}
        dict_scores['scores'] = {}
        dict_scores['scores']['model'] = [model().name]

        print('\n------[executing Hold out {} for {} model]------------------'.
              format(self.test_size * 100,
                     model().name))
        train_x, test_x, train_y, test_y = train_test_split(
            inputs,
            targets,
            test_size=self.test_size,
            random_state=0,
            shuffle=False)
        history = self.trainner.train_model(
            train_x,
            to_categorical(train_y),
            model(),
            validation_data=(test_x, to_categorical(test_y)),
            init_epoch=self.state.epochs)

        print(
            'Avaluating model-------------------------------------------------------------'
        )
        scores = model().evaluate(test_x, to_categorical(test_y))

        self.add_score(model().metrics_names, dict_scores['scores'], scores)

        (fpr, tpr, auc) = get_roc_curve(to_categorical(test_y),
                                        model().predict(test_x))
        dict_scores['roc'] = (fpr, tpr, auc)
        dict_scores['history'] = [history]
        dict_scores['cm'] = confusion_matrix(
            test_y, np.argmax(model().predict(test_x), axis=1))

        print("Result for the {} model".format(model().name))
        print(dict_scores['scores'])

        self.state.status = True

        return dict_scores
Пример #5
0
def dnn_mnist():
    # load datasets
    path = 'mnist.pkl.gz'
    train_set, val_set, test_set = load_mnist_datasets(path)
    X_train, y_train = train_set
    X_val, y_val = val_set
    X_test, y_test = test_set

    # 转为稀疏分类
    y_train, y_val, y_test = utils.to_categorical(
        y_train,
        10), utils.to_categorical(y_val, 10), utils.to_categorical(y_test, 10)

    # bookeeping for best model based on validation set
    best_val_acc = -1
    mnist = Mnist()

    # Train
    batch_size = 32
    lr = 1e-1
    for epoch in range(10):
        num_train = X_train.shape[0]
        num_batch = num_train // batch_size
        for batch in range(num_batch):
            # get batch data
            batch_mask = np.random.choice(num_train, batch_size)
            X_batch = X_train[batch_mask]
            y_batch = y_train[batch_mask]
            # 前向及反向
            mnist.forward(X_batch)
            loss = mnist.backward(X_batch, y_batch)
            if batch % 200 == 0:
                print("Epoch %2d Iter %3d Loss %.5f" % (epoch, batch, loss))

            # 更新梯度
            for w in ["W1", "b1", "W2", "b2", "W3", "b3"]:
                mnist.weights[w] -= lr * mnist.gradients[w]

        train_acc = mnist.get_accuracy(X_train, y_train)
        val_acc = mnist.get_accuracy(X_val, y_val)

        if (best_val_acc < val_acc):
            best_val_acc = val_acc

        # store best model based n acc_val
        print('Epoch finish. ')
        print('Train acc %.3f' % train_acc)
        print('Val acc %.3f' % val_acc)
        print('-' * 30)
        print('')

    print('Train finished. Best acc %.3f' % best_val_acc)
    test_acc = mnist.get_accuracy(X_test, y_test)
    print('Test acc %.3f' % test_acc)
Пример #6
0
    def __getitem__(self, index):
        """Return a (transformed) vrd_input and target sample from an integer index"""
        key, rel = self.data[index]
        subject_box = rel['subject']['bbox']  # [ymin, ymax, xmin, xmax]
        object_box = rel['object']['bbox']

        minbbox = [
            min(subject_box[0], object_box[0]),
            max(subject_box[1], object_box[1]),
            min(subject_box[2], object_box[2]),
            max(subject_box[3], object_box[3])
        ]

        image = imread('/scratch/datasets/sg_dataset/sg_' + self.type_dataset +
                       '_images/' + key)
        bboxes = [subject_box, object_box, minbbox]

        list_image = [
            image[bbox[0]:bbox[1], bbox[2]:bbox[3]] for bbox in bboxes
        ]

        subject_visual_input, object_visual_input, union_visual_input = tuple(
            transform(x) for x in list_image)

        list_binary_image = [np.zeros_like(image) for _ in range(len(bboxes))]
        for (binary_image, bbox) in zip(list_binary_image, bboxes):
            binary_image[bbox[0]:bbox[1], bbox[2]:bbox[3]] = 1

        subject_spatial_input, object_spatial_input, union_spatial_input = \
            tuple(spatial_transform(x)[0, :, :].view(1, 32, 32) for x in list_binary_image)

        predicate_spatial_feature = torch.cat(
            [subject_spatial_input, object_spatial_input], 0)

        object_word_feature = torch.FloatTensor(
            index_to_emb_dict[rel['object']['category']])
        subject_word_feature = torch.FloatTensor(
            index_to_emb_dict[rel['subject']['category']])

        if use_model == 1:
            input_sample = union_visual_input, predicate_spatial_feature
            target_sample = rel['subject']['category'], rel['object'][
                'category'], rel['predicate']
        elif use_model == 2:
            input_sample = torch.FloatTensor(to_categorical(rel['subject']['category'], object_size)), \
                           torch.FloatTensor(to_categorical(rel['object']['category'], object_size)), \
                           union_visual_input, predicate_spatial_feature
            target_sample = rel['predicate']
        elif use_model == 3:
            input_sample = (subject_word_feature, object_word_feature,
                            union_visual_input, predicate_spatial_feature)
            target_sample = rel['predicate']

        return input_sample, target_sample
Пример #7
0
    def cv(hyper_params):
        print 'next:', hyper_params
        fold = 2

        perm = np.random.permutation(len(ids_list))
        chunk_size = len(perm) / fold
        chunks = np.split(
            perm, [chunk_size * (i + 1) for i, a in enumerate(range(fold))])

        acc_total = 0.
        model.set_params(**hyper_params)

        graph = tf.Graph()
        with graph.as_default():
            # define variables and ops in `graph`
            model.build_graph(constants.n_entity, constants.n_attr,
                              constants.word_vec_dim, constants.n_label)
            init_op = tf.global_variables_initializer()

        # actual cross validation
        for i in range(fold):
            test_indices = chunks[i]
            train_indices = np.array(list(set(perm) - set(test_indices)))

            ids_train, ids_val = ids_list[train_indices, :], ids_list[
                test_indices, :]
            ent_train, ent_val = ents[train_indices], ents[test_indices]
            attr_train, attr_val = attrs[train_indices], attrs[test_indices]
            pol_train, pol_val = pols[train_indices], pols[test_indices]
            lens_train, lens_val = sent_lens[train_indices], sent_lens[
                test_indices]

            with tf.Session(graph=graph).as_default() as sess:
                sess.run(init_op)
                # train
                model.fit(sess, ids_train, ent_train, attr_train, lens_train,
                          utils.to_categorical(pol_train), embedding_w,
                          ent_embedding, attr_embedding)

                # validate
                acc = model.eval(sess, ids_val, ent_val, attr_val, lens_val,
                                 utils.to_categorical(pol_val))
                utils.log('validation {}/{} acc: {}'.format(i + 1, fold, acc),
                          True)
                acc_total += acc

        utils.log(
            'cv for params below end. mean validation acc: {}'.format(
                acc_total / fold), True)
        utils.log(hyper_params)
        return acc_total / fold
Пример #8
0
def main():
    data = pd.read_csv("blood.csv")
    x_train = data.drop(["a"], axis=1)
    y_train = data["a"]

    x_train = pd.get_dummies(x_train)
    target_map = dict()
    if y_train.dtype == "object":
        target_map = {val: i for (i, val) in enumerate(np.unique(y_train))}
        y_train = y_train.map(target_map)
        y_train = to_categorical(y_train)
    x_train = x_train.values

    x_train, x_test, y_train, y_test = train_test_split(x_train,
                                                        y_train,
                                                        test_size=0.2)

    mean_train = x_train.mean(axis=0)
    std_train = np.std(x_train, axis=0)

    x_train = (x_train - mean_train) / std_train
    x_test = (x_test - mean_train) / std_train

    mlp = MultiLayerPerceptron()
    mlp.add(Input(x_train.shape[1]))
    mlp.add(Dense(32, activation="relu"))
    mlp.add(Dense(2, activation="sigmoid"))
    mlp.build()
    mlp.fit(x_train,
            y_train,
            epoch=40,
            lr=0.05,
            validation_data=(x_test, y_test))
    mlp.draw()
Пример #9
0
def evaluate_audio_tagging(fold):
    """Evaluate the audio tagging predictions and write results.

    Args:
        fold (int): The fold (validation set) to evaluate.
    """
    import evaluation

    # Load grouth truth data and predictions
    dataset = cfg.to_dataset('training', preprocessed=False)
    df = io.read_metadata(dataset.metadata_path)
    df = df[df.fold == fold]
    y_true = utils.to_categorical(df.label)
    fold_str = 'training' + str(fold)
    path = cfg.predictions_path.format('predictions', fold_str)
    y_pred = pd.read_csv(path, index_col=0).values

    # Mask out those that are not manually verified
    mask = df.manually_verified == 1
    y_pred = y_pred[mask]
    y_true = y_true[mask]

    # Evaluate audio tagging performance
    scores = evaluation.evaluate_audio_tagging(y_true,
                                               y_pred,
                                               threshold=cfg.threshold)

    # Ensure output directory exist and write results
    os.makedirs(os.path.dirname(cfg.results_path), exist_ok=True)
    output_path = cfg.results_path.format(fold_str)
    scores.to_csv(output_path)

    # Print scores to 3 decimal places
    pd.options.display.float_format = '{:,.3f}'.format
    print('\n' + str(scores))
Пример #10
0
def test(model, sess):
    reviews, ent2idx, attr2idx, polarity2idx = load_semeval_reviews(
        constants.test_filename)

    # list of (ids, ent, attr, pol)
    tuples = []
    for review in reviews:
        if len(review.tokens) <= 1:
            # ids = [0] + [word2idx[tok] for tok in review.tokens]
            ids = [word2idx[tok] for tok in review.tokens]
        else:
            ids = [word2idx[tok] for tok in review.tokens]
        tuples_ = [(ids, ent2idx[op.ent], attr2idx[op.attr],
                    polarity2idx[op.polarity]) for op in review.opinions]
        tuples.extend(tuples_)

    unzipped = zip(*tuples)
    ids = utils.pad_sequences(unzipped[0], maxlen=constants.max_sent_len)
    sent_lens = np.array(map(
        lambda x: len(x)
        if len(x) < constants.max_sent_len else constants.max_sent_len,
        unzipped[0]),
                         dtype='int32')
    ents, attrs, pols = (np.array(x, dtype='int32') for x in unzipped[1:])

    acc = model.eval(sess, ids, ents, attrs, sent_lens,
                     utils.to_categorical(pols))
    utils.log('test accuracy: {}'.format(acc), True)
Пример #11
0
def main():
    data = datasets.load_digits()
    X = normalize(data.data)
    y = data.target

    # One-hot encoding of nominal y-values
    y = to_categorical(y)

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.33,
                                                        seed=8)

    # Perceptron
    clf = Perceptron(n_iterations=5000,
                     learning_rate=0.001,
                     loss=CrossEntropy,
                     activation_function=Sigmoid)
    clf.fit(X_train, y_train)

    y_pred = np.argmax(clf.predict(X_test), axis=1)
    y_test = np.argmax(y_test, axis=1)

    accuracy = accuracy_score(y_test, y_pred)

    print("Accuracy:", accuracy)

    # Reduce dimension to two using PCA and plot the results
    Plot().plot_in_2d(X_test,
                      y_pred,
                      title="Perceptron",
                      accuracy=accuracy,
                      legend_labels=np.unique(y))
Пример #12
0
 def transform_data(self, maxlen):
     self.maxlen_userUtter = maxlen
     # replace unknown words with <unk> in user utterance, and encode it
     # using word id.
     print '-------------------utter-----------------------'
     self.userUtter_encodePad, self.userUtter_txt = vectorizing_zeropad(
         self.userUtter_txt, self.maxlen_userUtter, self.word2id, prefix='')
     # replace unknown tags with <tag-unk> in user slot tags, and encode it
     # as 1hot matrix
     print '-------------------tag-----------------------'
     userTag_encodePad, self.userTag_txt = vectorizing_zeropad(
         self.userTag_txt,
         self.maxlen_userUtter,
         self.userTag2id,
         prefix='tag-')
     self.userTag_1hotPad = to_categorical(userTag_encodePad,
                                           self.userTag_vocab_size)
     # replace unknown intents with <intent-unk> in user intents, and encode
     # it as binary vec
     print '-------------------intent-----------------------'
     self.userIntent_vecBin, self.userIntent_txt = vectorizing_binaryVec(
         self.userIntent_txt,
         self.userIntent_vocab_size,
         self.userIntent2id,
         prefix='intent-')
Пример #13
0
    def _preprocess_input_data(self, data_splits):
        train_data, validation_data, test_data = data_splits
        train_images, train_classes = train_data
        validation_images, validation_classes = validation_data
        test_images, test_classes = test_data

        # encode outputs into one hot vectors
        class_arg_splits = (train_classes, validation_classes, test_classes)
        categorical_data = [
            to_categorical(class_arg_split)
            for class_arg_split in class_arg_splits
        ]
        train_classes, validation_classes, test_classes = categorical_data

        # flatten, add salt/pepper noise and normalize images
        image_splits = (train_images, validation_images, test_images)
        image_splits = [flatten(image_split) for image_split in image_splits]
        image_splits = [
            spice_up_images(image_split) for image_split in image_splits
        ]
        image_splits = [
            normalize_images(image_split) for image_split in image_splits
        ]

        train_images, validation_images, test_images = image_splits
        train_data = (train_images, train_classes)
        validation_data = (validation_images, validation_classes)
        test_data = (test_images, test_classes)
        preprocessed_data_split = (train_data, validation_data, test_data)
        return preprocessed_data_split
Пример #14
0
def main():
    data = pd.read_csv('blood.csv')
    x_train = data.drop(['a'], axis=1)
    y_train = data['a']

    x_train = pd.get_dummies(x_train)
    target_map = dict()
    if y_train.dtype == 'object':
        target_map = {val: i for (i, val) in enumerate(np.unique(y_train))}
        # print(target_map)
        y_train = y_train.map(target_map)
        y_train = to_categorical(y_train)
        # print(y_train[:5])
    x_train = x_train.values
    # x_train = (x_train - x_train.min(axis=0)) / (x_train.max(axis=0) - x_train.min(axis=0))
    # x_train = (x_train - x_train.mean(axis=0)) / np.std(x_train, axis=0)

    x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size=0.2)

    mean_train = x_train.mean(axis=0)
    std_train = np.std(x_train, axis=0) 

    x_train = (x_train - mean_train)/std_train
    x_test = (x_test - mean_train)/std_train



    mlp = MultiLayerPerceptron()
    mlp.add(Input(x_train.shape[1]))
    mlp.add(Dense(32, activation='relu'))
    mlp.add(Dense(2, activation='sigmoid'))
    mlp.build()
    mlp.fit(x_train, y_train, epoch=40, lr=0.05, validation_data=(x_test, y_test))
    mlp.draw()
Пример #15
0
def valid_train_epoch(dataset, sess, model, train_sample):
    """Test on the train data."""
    batch_iter = train_sample.train_sample_iter()
    _costs = 0.0
    num = 0
    marked_labels_list = list()
    for i, (X1_batch, X2_batch, y_batch) in enumerate(batch_iter):
        num = num + 1
        marked_labels_list.extend(y_batch)
        y_batch = to_categorical(y_batch)
        _batch_size = len(y_batch)
        fetches = [model.loss]
        feed_dict = {
            model.X1_inputs: X1_batch,
            model.X2_inputs: X2_batch,
            model.y_inputs: y_batch,
            model.batch_size: _batch_size,
            model.is_training: False,
            model.keep_prob: 1.0
        }
        _cost = sess.run(fetches, feed_dict)
        _costs += _cost[0]

    mean_cost = _costs / num
    return mean_cost
Пример #16
0
def take_percentage_of_type(trainX, trainY, relation_type, percentage):
    """take instances squentially
  assume input shuffled
  remove the rest of instances from the dataset"""
    if percentage >= 1.0:
        return trainX, trainY
    nb_classes = trainY.shape[1]
    trainY = np.argmax(trainY, axis=1)
    total_num = 0
    for i in xrange(len(trainY)):
        if trainY[i] == relation_type:
            total_num += 1
    current_num = 0
    remove_list, retain_list = [], []
    for i in xrange(len(trainY)):
        if trainY[i] == relation_type:
            current_num += 1
            if current_num > total_num * percentage:
                remove_list += [i]  # other
            else:
                retain_list += [i]
        else:
            retain_list += [i]
    trainY = utils.to_categorical(trainY, nb_classes=nb_classes)
    trainY = trainY[retain_list]
    for key in trainX.keys():
        trainX[key] = trainX[key][retain_list]
    return trainX, trainY
    def __getitem__(self, index):
        sentence = self.collection.sentences[index]
        batch = self.build_batch(sentence)

        batch_input, batch_output = tuple(zip(*tuple(batch)))
        sents, starts_1, starts_2 = tuple(zip(*tuple(batch_input)))

        sents = torch.tensor(sents)
        starts_1 = to_categorical(torch.tensor(starts_1), sents.size()[-1])
        starts_1.unsqueeze_(-2)
        starts_2 = to_categorical(torch.tensor(starts_2), sents.size()[-1])
        starts_2.unsqueeze_(-2)

        batch_output = torch.tensor(batch_output)

        return (sents, starts_1, starts_2), batch_output
Пример #18
0
def learn_toy(topology_file,
              features_file,
              orientation,
              num_hops,
              target_dims=None):
    if target_dims is None:
        target_dims = [-1]
    graph = labeled_graph.DenseLabeledGraph()
    graph.load_topology_from_text(topology_file)
    graph.load_features_from_text(features_file)
    graph.summary(verbose=1)
    x = utils.extract_inputs(graph.features, target_dims)
    y = utils.extract_targets(graph.features, target_dims)
    num_nodes = int(tf.shape(x)[0])
    num_features = tf.shape(x)[1]
    p = graph.transition_matrix
    y, num_classes = utils.to_categorical(tf.squeeze(y, axis=1))
    if orientation == 'reversed':
        num_hops *= 2
    x = dcnn.diffuse_features(orientation, p, x, num_hops)
    toy_dataset = tf.data.Dataset.from_tensor_slices((x, y)).repeat().batch(16)
    model = dcnn.DCNN(num_features, num_hops, num_classes, bias=True)
    loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
    model.compile(optimizer='adam', loss=loss_fn, metrics=['accuracy'])
    model.fit(toy_dataset, epochs=10, steps_per_epoch=768)
    model.evaluate(toy_dataset, steps=num_nodes)
Пример #19
0
def gradient_Wj(dist, X, y, Wj):
    start = timeit.default_timer()
    # gradient = np.zeros_like(Wj)  # (26, 128)

    if y.ndim < 2:
        y = to_categorical(y, 26)

    x_index = np.arange(0, X.shape[0])
    y_deltas = (y - dist[x_index]).reshape((len(x_index), -1)).T
    x_deltas = X[x_index, 1:].reshape((len(x_index), -1))

    gradient = np.dot(y_deltas, x_deltas)
    #print(gradient.shape)

    # for j in range(X_train.shape[0]):
    #     y_delta = (y_train[j] - dist[j]).reshape((-1, 1))
    #     x_delta = X_train[j, 1:].reshape((1, -1))
    #     gradient += np.dot(y_delta, x_delta)

    gradient /= len(X)

    # for l2 regularization
    gradient += Wj
    #flattened_gradient = gradient.flatten()

    # result = open(r'grad.txt', 'w')
    # for g in flattened_gradient:
    #     result.write(str(g) + "\n")
    # result.close()

    stop = timeit.default_timer()
    #print('gradient_Wj (s): ' + str(stop - start))

    return gradient
Пример #20
0
def valid_epoch(dataset, sess, model):
    """Test on the valid data."""
    batch_iter = dataset.batch_iter(False)
    _costs = 0.0
    predict_labels_list = list()  # 所有的预测结果
    marked_labels_list = list()
    for i, (X1_batch, X2_batch, y_batch) in tqdm(enumerate(batch_iter)):
        marked_labels_list.extend(y_batch)
        y_batch = to_categorical(y_batch)
        _batch_size = len(y_batch)
        fetches = [model.loss, model.y_pred]
        feed_dict = {
            model.X1_inputs: X1_batch,
            model.X2_inputs: X2_batch,
            model.y_inputs: y_batch,
            model.batch_size: _batch_size,
            model.tst: True,
            model.keep_prob: 1.0
        }
        _cost, predict_labels = sess.run(fetches, feed_dict)
        _costs += _cost
        predict_labels = map(labellist2id, predict_labels)
        predict_labels_list.extend(predict_labels)
    predict_label_and_marked_label_list = zip(predict_labels_list,
                                              marked_labels_list)
    precision, recall, f1 = score_eval(predict_label_and_marked_label_list)
    mean_cost = _costs / n_va_batches
    return mean_cost, precision, recall, f1
Пример #21
0
def train(input_df, max_feature_length, num_classes, embedding_size, learning_rate, batch_size, num_epochs, save_dir=None, print_summary=False):
    # Stage 1: Convert raw texts into char-ids format && convert labels into one-hot vectors
    X_train, X_test, y_train, y_test = get_input_data(input_df)
    y_train = to_categorical(y_train, num_classes)

    # Stage 2: Build Model
    num_filters = [64, 128, 256, 512]

    model = build_model(num_filters=num_filters, num_classes=num_classes, embedding_size=embedding_size, learning_rate=learning_rate)

    # Stage 3: Training
    save_dir = save_dir if save_dir is not None else 'checkpoints'
    filepath = os.path.join(save_dir, "weights-{epoch:02d}-{val_acc:.2f}.hdf5")
    checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')

    if print_summary:
        print(model.summary())

    model.fit(
        x=X_train,
        y=y_train,
        batch_size=batch_size,
        epochs=num_epochs,
        validation_split=0.33,
        callbacks=[checkpoint],
        shuffle=True,
        verbose=True
    )
Пример #22
0
def main():
    data = datasets.load_digits()
    X = normalize(data.data)
    y = data.target

    # convert the nominal y values to binary
    y = to_categorical(y)

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)

    #mlp
    clf = MultilayerPerceptron(n_hidden=16,
                               n_iterations=1000,
                               learning_rate=0.01)
    clf.fit(X_train, y_train)
    y_pred = np.argmax(clf.predict(X_test), axis=1)
    y_test = np.argmax(y_test, axis=1)

    accuracy = accuracy_score(y_test, y_pred)
    print("Accuracy:", accuracy)

    # Reduce dimension to two using PCA and plot the results
    Plot().plot_in_2d(X_test,
                      y_pred,
                      title="Multilayer Perceptron",
                      accuracy=accuracy,
                      legend_labels=np.unique(y))
Пример #23
0
def do_eval(test_data_path, shuffle=False):
    if FLAGS.load_model is None:
        raise ValueError("You need to specify the model location by --load_model=[location]")

    # Load Testing Data
    question_1, question_2, labels = get_input_from_csv(test_data_path)

    if shuffle:
        question_1, question_2, labels = shuffle_data(question_1, question_2, labels)

    # Load Pre-trained Model
    if FLAGS.best_glove:
        import en_core_web_md
        nlp = en_core_web_md.load()  # load best-matching version for Glove
    else:
        nlp = spacy.load('en')
    embedding_matrix = load_glove_embeddings(nlp.vocab, n_unknown=FLAGS.num_unknown)  # shape=(1071074, 300)

    tf.logging.info('Build model ...')
    esim = ESIM(embedding_matrix, FLAGS.max_length, FLAGS.num_hidden, FLAGS.num_classes, FLAGS.keep_prob, FLAGS.learning_rate)

    if FLAGS.load_model:
        model = esim.build_model(FLAGS.load_model)
    else:
        raise ValueError("You need to specify the model location by --load_model=[location]")

    # Convert the "raw data" to word-ids format && convert "labels" to one-hot vectors
    q1_test, q2_test = convert_questions_to_word_ids(question_1, question_2, nlp, max_length=FLAGS.max_length, tree_truncate=FLAGS.tree_truncate)
    labels = to_categorical(np.asarray(labels, dtype='int32'))

    scores = model.evaluate([q1_test, q2_test], labels, batch_size=FLAGS.batch_size, verbose=1)

    print("=================== RESULTS =====================")
    print("[*] LOSS OF TEST DATA: %.4f" % scores[0])
    print("[*] ACCURACY OF TEST DATA: %.4f" % scores[1])
Пример #24
0
def mel_collate(batch):
    """ Zero-pads model inputs and targets based on number of frames per step """
    len_out = int(hparams.freq * ceil(float(hparams.seq_len / hparams.freq)))

    mels = []
    labels = []
    labels_onehot = []
    for mel, speaker, sample_len, frame_len in batch:
        if frame_len < len_out:
            len_pad = len_out - frame_len
            x = np.pad(mel, ((0, len_pad), (0, 0)), 'constant')
        else:
            start = np.random.randint(frame_len - len_out + 1)
            x = mel[start:start + len_out]

        mels.append(x)

        label = to_categorical(speaker, hparams.speaker_num)
        labels.append(speaker)
        labels_onehot.append(label)

    mels = torch.FloatTensor(mels)
    labels = torch.LongTensor(labels)
    labels_onehot = torch.FloatTensor(labels_onehot)
    return mels, labels, labels_onehot
Пример #25
0
def train(train_data, val_data, batch_size, n_epochs, save_dir=None):
    # Stage 1: Read training data (csv) && Preprocessing them
    tf.logging.info('Loading training and validataion data ...')
    train_question_1, train_question_2, train_labels = get_input_from_csv(train_data)
    # val_question_1, val_question_2, val_labels = get_input_from_csv(val_data)

    # Stage 2: Load Pre-trained embedding matrix (Using GLOVE here)
    tf.logging.info('Loading pre-trained embedding matrix ...')
    if FLAGS.best_glove:
        import en_core_web_md
        nlp = en_core_web_md.load()  # load best-matching version for Glove
    else:
        nlp = spacy.load('en')
    embedding_matrix = load_glove_embeddings(nlp.vocab, n_unknown=FLAGS.num_unknown)  # shape=(1071074, 300)

    # Stage 3: Build Model
    tf.logging.info('Build model ...')
    esim = ESIM(embedding_matrix, FLAGS.max_length, FLAGS.num_hidden, FLAGS.num_classes, FLAGS.keep_prob, FLAGS.learning_rate)

    if FLAGS.load_model:
        model = esim.build_model(FLAGS.load_model)
    else:
        model = esim.build_model()

    # Stage 4: Convert the "raw data" to word-ids format && convert "labels" to one-hot vectors
    tf.logging.info('Converting questions into ids ...')
    q1_train, q2_train = convert_questions_to_word_ids(train_question_1, train_question_2, nlp, max_length=FLAGS.max_length, tree_truncate=FLAGS.tree_truncate)
    train_labels = to_categorical(np.asarray(train_labels, dtype='int32'))

    # q1_val, q2_val = convert_questions_to_word_ids(val_question_1, val_question_2, nlp, max_length=FLAGS.max_length, tree_truncate=FLAGS.tree_truncate)
    # val_labels = to_categorical(np.asarray(val_labels, dtype='int32'))

    # Stage 5: Training
    tf.logging.info('Start training ...')

    callbacks = []
    save_dir = save_dir if save_dir is not None else 'checkpoints'
    filepath = os.path.join(save_dir, "weights-{epoch:02d}-{val_acc:.2f}.hdf5")
    checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
    callbacks.append(checkpoint)

    if FLAGS.tensorboard:
        graph_dir = os.path.join('.', 'GRAPHs')
        if not os.path.exists(graph_dir):
            os.makedirs(graph_dir)
        tb = TensorBoard(log_dir=graph_dir, histogram_freq=0, write_graph=True, write_images=True)
        callbacks.append(tb)

    model.fit(
        x=[q1_train, q2_train],
        y=train_labels,
        batch_size=batch_size,
        epochs=n_epochs,
        # validation_data=([q1_val, q2_val], val_labels),
        validation_split=0.2,
        callbacks=callbacks,
        shuffle=True,
        verbose=FLAGS.verbose
    )
Пример #26
0
def preprocess(data, params):
  # change list of dictionary to dictionary of lists
  data = dict(zip(data[0],zip(*[d.values() for d in data])))
  Y = data.pop('y')
  X = data
  X = pad_features(X, params)
  Y = to_categorical(Y, nb_classes=params['num_classes'])
  return X, Y
Пример #27
0
	def evaluate(self, data_gen, training=False):

		preds_history = {}
		targets_history = {}
		scores = {}

		for X, Y in data_gen:
			
			if training: self.fit_on_batch(X, Y)

			self.train(training)
			Y_pred = self(X)

			for target in Y.keys():
				if Y[target] is None: continue
				if target not in self.targets: continue
				target_idx = self.targets.index(target)

				if target not in preds_history:
					preds_history[target] = []
					targets_history[target] = []

				ypred = Y_pred[target_idx][0].cpu().data.numpy()

				if target != 'regression':
					ytarget = to_categorical([Y[target]], num_classes=self.classes[target_idx])[0]
				else:
					ytarget = Y[target]
				preds_history[target].append(ypred)
				targets_history[target].append(ytarget)
				
		for target in preds_history.keys(): 
			preds_history[target] = np.array(preds_history[target])
			targets_history[target] = np.array(targets_history[target])

			if target != 'regression':
				mask = targets_history[target].sum(axis=0) > 2
				if mask.sum() < 2: continue
				print (target, targets_history[target].shape[0])

			score, baseline = 0.0, 0.0
			if target == 'regression':
				#score = pearsonr (targets_history[target], preds_history[target][:, 0])
				#baseline = pearsonr (targets_history[target], shuffle(preds_history[target][:, 0]))
				#scores[target] = "{0:0.3f}(p={1:0.4f})/{2:0.3f}(p={3:0.4f})".format(score[0], score[1], baseline[0], baseline[1])
				score = concordance_index(targets_history[target], preds_history[target][:, 0],
					event_observed=targets_history[target]<1.0)
				baseline = concordance_index(targets_history[target], shuffle(preds_history[target][:, 0]),
					event_observed=targets_history[target]<1.0)
				scores[target] = "{0:0.3f}/{1:0.3f}".format(score, baseline)

			else:
				score = roc_auc_score (targets_history[target][:, mask], preds_history[target][:, mask])
				baseline = roc_auc_score (targets_history[target][:, mask], shuffle(preds_history[target][:, mask]))
				scores[target] = "{0:0.3f}/{1:0.3f}".format(score, baseline)

		return scores
Пример #28
0
 def fit(self, X, y):
     y = to_categorical(y)
     y_pred = np.zeros(np.shape(y))
     for i in self.bar(range(self.n_estimators)):
         tree = self.trees[i]
         y_and_pred = np.concatenate((y, y_pred), axis=1)
         tree.fit(X, y_and_pred)
         update_pred = tree.predict(X)
         y_pred -= np.multiply(self.learning_rate, update_pred)
Пример #29
0
def do_evaluation(eval_data, max_feature_length):
    if FLAGS.load_model is None:
        raise ValueError(
            "You need to specify the model location by --load_model=[location]"
        )

    # Load Testing Data
    comments = []
    sentiments = []
    contents = []

    with open(eval_data, 'r') as f:
        for line in f.readlines():
            comment, sentiment, content = line.split(',')
            comments.append(comment)
            sentiments.append(sentiment)
            contents.append(content)

    for i in xrange(len(comments)):
        X, y_sentiment, y_comment = get_input_data_from_text(
            comments[i], sentiments[i], contents[i], max_feature_length)
        y_sentiment = to_categorical(y_sentiment, FLAGS.n_sentiment_classes)
        y_comment = to_categorical(y_comment, FLAGS.n_content_classes)

        sentiment_model = build_model(
            num_filters=num_filters,
            num_classes=FLAGS.n_sentiment_classes,
            sequence_max_length=FLAGS.max_feature_length,
            embedding_size=FLAGS.embedding_size,
            learning_rate=FLAGS.learning_rate,
            load_pretrained_model=True)

        comment_model = build_model(
            num_filters=num_filters,
            num_classes=FLAGS.n_content_classes,
            sequence_max_length=FLAGS.max_feature_length,
            embedding_size=FLAGS.embedding_size,
            learning_rate=FLAGS.learning_rate,
            load_pretrained_model=True)

        sentiment_loss_and_history = sentiment_model.evaluate(
            X, y_sentiment, batch_size=FLAGS.batch_size, verbose=1)
        comment_loss_and_history = comment_model.evaluate(
            X, y_sentiment, batch_size=FLAGS.batch_size, verbose=1)
Пример #30
0
def train(input_dim, z_dim, num_epochs, num_classes, batch_size, learning_rate, shuffle=False, data_dir=None, summary_dir=None):

    # Load data
    X_train = np.load(os.path.join(data_dir, 'data.npy'))
    y_train = np.load(os.path.join(data_dir, 'label.npy'))
    y_train = to_categorical(y_train, num_classes + 1)
    print('Number of training images: %d' % X_train.shape[0])

    with tf.Graph().as_default():
        sess = tf.Session()

        with sess.as_default():
            # Build model
            aae = AAE(input_dim, z_dim, num_classes, batch_size, learning_rate)
            aae.build(G_type=FLAGS.G_type)

            loss_summary_writer = tf.summary.FileWriter(summary_dir, sess.graph)

            num_batches_per_epoch = X_train.shape[0] // batch_size

            tf.logging.info('Create new session')
            sess.run(tf.global_variables_initializer())

            for epoch in range(num_epochs):
                total_vae_loss, total_gen_loss, total_disc_loss = 0.0, 0.0, 0.0

                for i in range(num_batches_per_epoch):
                    start_index = i * batch_size
                    end_index = min((i + 1) * batch_size, X_train.shape[0])

                    X_batch = X_train[start_index:end_index]
                    y_batch = y_train[start_index:end_index]

                    vae_loss, gen_loss, disc_loss = train_step(X_batch, y_batch, sess, aae, loss_summary_writer)

                    total_vae_loss += vae_loss
                    total_gen_loss += gen_loss
                    total_disc_loss += disc_loss

                print("Epoch %3d ==> vae_loss: %.4f\tgen_loss: %.4f\tdisc_loss: %.4f" % (epoch, total_vae_loss / num_batches_per_epoch, total_gen_loss / num_batches_per_epoch, total_disc_loss / num_batches_per_epoch))

            if FLAGS.plot:
                indices = np.random.choice(X_train.shape[0], size=batch_size)
                X_sample = X_train[indices]
                y_sample = y_train[indices]
                plot_reconstructed_images(X_sample, y_sample, aae, sess)

                plot_generated_images(aae, sess)

                indices = np.random.choice(X_train.shape[0], size=5000)
                X_sample = X_train[indices]
                y_sample = y_train[indices]

                X_latent_space = aae.get_latent_space(sess, X_sample)
                X_latent_space = X_latent_space.astype('float64')
                plot_tsne(X_latent_space, y_sample)
    def flow(self, mode='train'):
            while True:
                if mode =='train':
                    shuffle(self.train_keys)
                    keys = self.train_keys
                elif mode == 'val' or  mode == 'demo':
                    shuffle(self.validation_keys)
                    keys = self.validation_keys
                else:
                    raise Exception('invalid mode: %s' % mode)

                inputs = []
                targets = []
                for key in keys:
                    image_path = self.path_prefix + key
                    image_array = imread(image_path)
                    image_array = imresize(image_array, self.image_size)

                    num_image_channels = len(image_array.shape)
                    if num_image_channels != 3:
                        continue

                    ground_truth = self.ground_truth_data[key]

                    if self.do_random_crop:
                        image_array = self._do_random_crop(image_array)

                    image_array = image_array.astype('float32')
                    if mode == 'train' or mode == 'demo':
                        if self.ground_truth_transformer != None:
                            image_array, ground_truth = self.transform(
                                                                image_array,
                                                                ground_truth)
                            ground_truth = (
                                self.ground_truth_transformer.assign_boxes(
                                                            ground_truth))
                        else:
                            image_array = self.transform(image_array)[0]

                    inputs.append(image_array)
                    targets.append(ground_truth)
                    if len(targets) == self.batch_size:
                        inputs = np.asarray(inputs)
                        targets = np.asarray(targets)
                        # this will not work for boxes
                        targets = to_categorical(targets)
                        if mode == 'train' or mode == 'val':
                            inputs = self.preprocess_images(inputs)
                            yield self._wrap_in_dictionary(inputs, targets)
                        if mode == 'demo':
                            yield self._wrap_in_dictionary(inputs, targets)
                        inputs = []
                        targets = []
 def fit(self, X, y):
     y = to_categorical(y)
     super(GBDTClassifier, self).fit(X, y)