예제 #1
0
    def fit(self, X_train, y_train, X_test, y_test):
        self.input_dim = X_train.shape[1]
        X_train = self.convertToArray(X_train)
        X_test = self.convertToArray(X_test)
        y_train = self.convertToArray(y_train)
        y_test = self.convertToArray(y_test)
        print("Start training. Configurations count: %i" % (len(self.param_grid)))
        i = 1
        best_score = 0
        self.check_keys(['hLayers','activation','optimizer'])
        for curParams in self.param_grid:
            if ('hLayers' not in curParams.keys()):
                raise Exception("'hLayers' must be one of the grid parameters")
            else:
                hLayers = curParams.get('hLayers')
                
            if ('activation' not in curParams.keys()):
                activation = self.default_activation
            else:
                activation = curParams.get('activation')
                
            if ('optimizer' not in curParams.keys()):
                optimizer = self.default_optimizer
            else:
                optimizer = curParams.get('optimizer')
                
            filePath = self.log_files_path + self.getFileName(hLayers, activation, optimizer)
            dnn = self.createDNN(hLayers, activation, optimizer)
            lhdnn, chkp, es = self.createCallBacks(X_train, y_train, X_test, y_test, filePath)
            dnn.fit(X_train, y_train, nb_epoch=self.epoch_count, batch_size=self.batch_size,
                   validation_data = [X_test, y_test], verbose=2, callbacks=[lhdnn, chkp, es])
            if self.score_func == 'accuracy':

                true_test = np_utils.probas_to_classes(y_test)
                pred_test = np_utils.probas_to_classes(dnn.predict(X_test))
                cur_score = accuracy_score(true_test, pred_test)
            
                if (cur_score > best_score):
                    best_score = cur_score
                    best_dnn = dnn
                self.writeInfoToFile(lhdnn, filePath)
                
            elif self.score_func == 'r2_score':
                cur_score = r2_score(y_test, dnn.predict(X_test))
            
                if (cur_score > best_score):
                    best_score = cur_score
                    best_dnn = dnn

                self.writeInfoToFile(lhdnn, filePath)

            print("Configuration #%i. Completed." % (i))
            i += 1
        return dnn
예제 #2
0
    def test(self, data):
        y_predicted = np_utils.probas_to_classes(self.model.predict(data.x))
        y_actual = np_utils.probas_to_classes(data.y)
        error = (np.ravel(y_predicted) != np.ravel(y_actual)).sum().astype(float)/y_actual.shape[0]

        print("PREDICTED: class 0: {0}, class 1: {1}, class 2: {2}".format(
              np.sum(np.ravel(y_predicted) == 0),
              np.sum(np.ravel(y_predicted) == 1),
              np.sum(np.ravel(y_predicted) == 2)))
        print("ACTUAL: class 0: {0}, class 1: {1}, class 2: {2}".format(
              np.sum(np.ravel(y_actual) == 0),
              np.sum(np.ravel(y_actual) == 1),
              np.sum(np.ravel(y_actual) == 2)))
        print("ERROR RATE: ", error)

        return error
예제 #3
0
def train(label_set='full', drop_unk=False,
          word_vecs=None, setup_only=False, layer_sizes=[512,256],
          pool_mode='sum'):
    print "Loading data..."
    df = sentences_df(SENTENCES_CSV, labels=label_set, drop_unk=drop_unk)
    X, y, word2idx, l_enc = load_dataset(df, pad=True)
    print "X shape:", X.shape
    y_orig = y
    y_binary = to_categorical(y)
    labels = np.unique(y_orig)
    nb_labels = labels.shape[0]
    if drop_unk:
        label_set_str = label_set + ' (-unk)'
    else:
        label_set_str = label_set
    print "Number of labels: %i [%s]" % (nb_labels, label_set_str)
    if nb_labels > 2:
        y = y_binary
    maxlen = X.shape[1]
    vocab_size = len(word2idx) + 1 # 0 masking
    if pretrained_embeddings is True:
        word_vectors = load_bin_vec(word_vecs, word2idx)
        add_unknown_words(word_vectors, word2idx)
        embedding_weights = np.zeros((vocab_size+1, emb_dim))
        for word, index in word2idx.items():
            embedding_weights[index,:] = word_vectors[word]
    else:
        embedding_weights = None
    print "Data loaded."

    skf = StratifiedKFold(y_orig, n_folds=10, shuffle=True, random_state=0)
    cv_scores = []
    for i, (train, test) in enumerate(skf):
        start_time = time.time()
        nn = None
        nn = EnsembleNN(vocab_size, nb_labels, emb_dim, maxlen,
                        embedding_weights, filter_hs, nb_filters,
                        dropout_p, trainable_embeddings, pretrained_embeddings,
                        layer_sizes, pool_mode)
        if i == 0:
            print_summary(nn.model.layers)
        acc = train_and_test_model(nn, X[train], y[train], X[test], y[test],
                                   batch_size, nb_epoch,
                                   lr, beta_1, beta_2, epsilon)
        cv_scores.append(acc)
        train_time = time.time() - start_time
        print('\nLabel frequencies in y[test]')
        print_label_frequencies((y_orig[test], l_enc))
        y_pred = nn.model.predict(X[test])
        y_pred = probas_to_classes(y_pred)
        c = Counter(y_pred)
        total = float(len(y_pred))
        print('\nLabel frequencies in predict(y[test])')
        for label, count in c.most_common():
            print l_enc.inverse_transform(label), count, count / total
        print "fold %i/10 - time: %.2f s - acc: %.4f on %i samples" % \
            (i+1, train_time, acc, len(test))
    print "Avg cv accuracy: %.4f" % np.mean(cv_scores)
예제 #4
0
 def predict(self, X):
     if not isinstance(X,np.ndarray):
         X = X.values
     ypred = self.model.predict(X, batch_size=self.batch_size, verbose=self.verbose)
     if self.nb_classes>1:
         ypred = np_utils.probas_to_classes(ypred)
     else:
         ypred = ypred.flatten()
     return ypred
    def on_epoch_end(self, epoch, logs={}):
        self.train_losses.append(mse(self.y_train, self.model.predict(self.X_train)))
        self.val_losses.append(logs.get('val_loss'))        

        if self.score_func == 'accuracy':
            true_train = np_utils.probas_to_classes(self.y_train)
            pred_train = np_utils.probas_to_classes(self.model.predict(self.X_train))
            self.add_train_scores.append(accuracy_score(true_train, pred_train))

            true_test = np_utils.probas_to_classes(self.y_test)
            pred_test = np_utils.probas_to_classes(self.model.predict(self.X_test))
            val_score = accuracy_score(true_test, pred_test)
            self.add_val_scores.append(val_score)
        elif self.score_func == 'r2_score':
            val_score = r2_score(self.y_test, self.model.predict(self.X_test))
            self.add_val_scores.append(val_score)
            self.add_train_scores.append(r2_score(self.y_train, self.model.predict(self.X_train)))    
            
        self.best_score = max(self.best_score, val_score)
        self.printCurrentStage(epoch)
예제 #6
0
def recognize(chars):
    N = len(chars)
    width, height = chars[0].shape

    chars = numpy.asarray(chars, dtype=numpy.float32).reshape((N, 1, width, height)) / 255.0

    # chinese_classes = np_utils.probas_to_classes(chinese_model.predict(chars[:1], batch_size=1))
    alnum_classes = np_utils.probas_to_classes(alnum_model.predict(chars[1:], batch_size=6))

    return ['浙'] + \
           [alnum_labels[cls] for cls in alnum_classes]
예제 #7
0
def predict(model, x, y, ix, output_dir):
    """
    Store predictions in a CSV file and predicted probabilities in an NPZ file.
    """

    y_proba_pred = model.predict(x)
    np.savez_compressed(output_dir + '/predictions_proba.npz',
        y_proba_pred=y_proba_pred)

    df = pd.DataFrame({
        'y_pred': np_utils.probas_to_classes(y_proba_pred),
        'y_true': np_utils.categorical_probas_to_classes(y)})

    df['accurate'] = df['y_true'] == df['y_pred']

    df['split'] = ''
    for key, indexes in ix.items():
        df.ix[indexes, 'split'] = key

    df = df[['split', 'y_true', 'y_pred', 'accurate']]

    df.to_csv(output_dir + '/predictions.csv', index=None)

    return y_proba_pred
예제 #8
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--csv_in', required=True, help='CSV input filename')
    parser.add_argument('--csv_out', required=True, help='CSV output filename--do NOT confuse with csv_in')
    parser.add_argument('--video_in', required=True, help='Video input filename')
    parser.add_argument('--num_frames', type=int, default=1000, help='Number of frames to use to form training and test set. \
                    Default: 1000')
    parser.add_argument('--frame_interval', type=int, default=30, help='Number of frames to skip when creating training and \
                    test sets. Must be greater than 0. Default: 30')
    parser.add_argument('--object', required=True, help='Object(s) to classify. Multiple values should be separated by a comma \
                    (e.g., person,car)')
    parser.add_argument('--scale', type=float, default=0.1, help='Scale factor applied to each frame. Default: 0.1')
    parser.add_argument('--sample', dest='sample_data', action='store_true')
    parser.add_argument('--no_sample', dest='sample_data', action='store_false')
    parser.set_defaults(sample_data=True)
    parser.add_argument('--test_ratio', type=float, default=0.5, help='Ratio between test and training set. Default: 0.5')
    parser.add_argument('--models', default='lsvm',
        help='Type of model: Logistic Regression (lr), Linear SVM (lsvm), rbf SVM (rsvm), or polynomial SVM (psvm). \
                    Multiple values must be separated by comma (e.g., lr,lsvm). Default: lsvm')
    parser.add_argument('--class_weight_factor', type=float, default=1.0,
        help='How much to increase weight of positive training examples, and decrease weight of negative examples. \
                    Higher -> fewer false negatives, more false positives. Default: 1.0')
    args = parser.parse_args()
    csv_out = args.csv_out
    if args.frame_interval <= 0:
        import sys
        print '--frame_interval must be greater than 0'
        sys.exit(1)
    print args
    models_to_try = args.models.strip().split(',')
    args_dict = args.__dict__
    del(args_dict['models'])
    del(args_dict['csv_out'])
    init_header, init_row = zip(*sorted(list(args_dict.iteritems())))
    init_header, init_row = list(init_header), list(init_row)

    print 'Retrieving %d frames from %s' % (args.num_frames, args.video_in)
    video_frames = VideoUtils.get_all_frames(args.num_frames, args.video_in, scale=args.scale,
            interval=args.frame_interval)

    print 'Retrieving %d labels from %s' % (args.num_frames, args.csv_in)
    Y = DataUtils.get_binary(args.csv_in, [args.object], limit=args.num_frames, interval=args.frame_interval)
    Y = Y.flatten()

    if args.sample_data:
        print 'Partitioning training and test sets using random sampling'
        train_ind, test_ind, Y_train, Y_test = train_test_split(np.arange(len(Y)), Y, test_size=args.test_ratio)
    else:
        print 'Partitioning training and test sets using simple strategy: first \
        %0.2f are training, remaining %0.2f are test' % (1-args.test_ratio, args.test_ratio)
        split_index = int(len(Y)*(1 - args.test_ratio))
        inds = np.arange(len(Y))
        train_ind, test_ind, Y_train, Y_test = inds[:split_index], inds[split_index:], Y[:split_index], Y[split_index:]
    print '(train) positive examples: %d, total examples: %d' % \
        (np.count_nonzero(np_utils.probas_to_classes(Y_train)),
         len(Y_train))
    print '(test) positive examples: %d, total examples: %d' % \
        (np.count_nonzero(np_utils.probas_to_classes(Y_test)),
         len(Y_test))
    class_weights = DataUtils.get_class_weights(Y_train, args.class_weight_factor)
    print 'Class weights:', class_weights

    rows = []
    print 'Getting features....'
    X = get_features(video_frames)
    X_train, X_test = X[train_ind], X[test_ind]
    for model_type in models_to_try:
        headers = init_header[:]
        row = init_row[:]
        headers.append('model')
        row.append(model_type)
        print model_type
        model = get_model(model_type, X_train, Y_train, class_weights)
        print 'evaluating on training set'
        train_metrics = evaluate_model(model, X_train, Y_train)
        for key, val in train_metrics:
            headers.append('train ' + key)
            row.append(val)
        print train_metrics
        print 'evaluating on test set'
        test_metrics =  evaluate_model(model, X_test, Y_test)
        for key, val in train_metrics:
            headers.append('test ' + key)
            row.append(val)
        print test_metrics
        rows.append(row)
    output_csv(csv_out, np.array(rows), np.array(headers))
예제 #9
0
def create_model(word_to_id,
                 train_data,
                 dev_data,
                 test_data,
                 embedding_layer=None):

    id_to_word = {jj: ii for ii, jj in word_to_id.items()}
    word_num = len(id_to_word)

    X = []
    Y = []
    for sentence in train_data:
        # app = False
        # for wordtag in sentence['tags']:
        #     if wordtag!=1:
        #         app = True
        #         break
        # if app:
        X.append(sentence['words'])
        Y.append(sentence['tags'])
    newY = []
    for sent in Y:
        sent2 = []
        for wtag in sent:
            xx = np.zeros(4)
            xx[wtag - 1] = 1
            sent2.append(xx)
        newY.append(sent2)

    X, Y = np.array(X, dtype='int32'), np.array(newY, dtype='int32')
    # X, X_test, Y, Y_test = cross_validation.train_test_split(X, Y, test_size=0.1, random_state=123)
    print(X.shape[0])
    print(Y.shape[0])
    seqlen = 50
    # Y = Y.reshape((-1, seqlen, 1))

    seq_input = Input(shape=(seqlen, ), dtype='int32')
    if not embedding_layer:
        embedded = embedding_layer(seq_input)
        print('Loading pretrained embedding')
    else:
        print('Default embedding')
        embedded = Embedding(len(id_to_word) + 1,
                             50,
                             input_length=seqlen,
                             dropout=0.2)(seq_input)

    forwards = LSTM(128, return_sequences=True)(embedded)
    backwards = LSTM(128, return_sequences=True, go_backwards=True)(embedded)
    merged = merge([forwards, backwards], mode='concat', concat_axis=-1)
    after_dp = Dropout(0.2)(merged)

    ## Convoluntion1D Layer
    half_window_size = 2
    paddinglayer = ZeroPadding1D(padding=half_window_size)(embedded)
    conv = Conv1D(nb_filter=50,
                  filter_length=(2 * half_window_size + 1),
                  border_mode='valid')(paddinglayer)
    conv_d = Dropout(0.1)(conv)
    dense_conv = TimeDistributed(Dense(50))(conv_d)
    ## Concat Bi-LSTM Layer and Convolution Layer
    rnn_cnn_merge = merge([after_dp, dense_conv], mode='concat', concat_axis=2)

    outputs = TimeDistributed(Dense(4, activation='softmax'))(rnn_cnn_merge)
    #output = TimeDistributed(Dense(1,activation='sigmod'))
    model = Md(input=seq_input, output=outputs)
    with open('model/model_cnn.json', 'w') as fout:
        fout.write(model.to_json())
    modelfile = './model/model_cnn.h5'

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    with open('model/model_cnn.yaml', 'w') as fout:
        fout.write(model.to_yaml())

    if X.shape[0] > 5000: nb_epoch = 200
    if X.shape[0] > 10000: nb_epoch = 150
    if X.shape[0] > 40000: nb_epoch = 10
    if X.shape[0] > 70000: nb_epoch = 7
    if X.shape[0] > 100000: nb_epoch = 4
    model.fit(X,
              Y,
              batch_size=256,
              callbacks=[ModelCheckpoint(modelfile, save_best_only=True)],
              validation_split=0.1,
              nb_epoch=nb_epoch)

    print("###########     dev        ##############")

    X_dev = []
    Y_dev = []
    for sentence in dev_data:
        X_dev.append(sentence['words'])
        Y_dev.append(sentence['tags'])
    tempY = []
    for sent in Y_dev:
        sent2 = []
        for wtag in sent:
            xx = np.zeros(4)
            xx[wtag - 1] = 1
            sent2.append(xx)
        tempY.append(sent2)

    X_dev, Y_dev = np.array(X_dev, dtype='int32'), np.array(tempY,
                                                            dtype='int32')

    print(X_dev.shape[0])
    print(Y_dev.shape[0])

    loss_and_metrics = model.evaluate(X_dev, Y_dev, batch_size=128)
    print(loss_and_metrics)

    X_test = []
    Y_test = []
    for sentence in test_data:
        X_test.append(sentence['words'])
        Y_test.append(sentence['tags'])
    teY = []
    for sent in Y_test:
        sent2 = []
        for wtag in sent:
            xx = np.zeros(4)
            xx[wtag - 1] = 1
            sent2.append(xx)
        teY.append(sent2)

    X_test, Y_test = np.array(X_test, dtype='int32'), np.array(teY,
                                                               dtype='int32')

    print("###########     test        ##############")
    print(X_test.shape[0])
    print(Y_test.shape[0])
    Z = model.predict(X_test, batch_size=256, verbose=1)
    print(Z.shape)

    tZ = np_utils.probas_to_classes(Z.reshape((-1, 1)))
    tY = np_utils.probas_to_classes(Y_test.reshape((-1, 1)))

    TP = (tZ + tY == 2).sum()
    PP = (tZ == 1).sum()
    RR = (tY == 1).sum()
    prec, reca = TP / PP, TP / RR
    F1 = 2 * prec * reca / (prec + reca)
    ret = 'P=%d/%d %.5f\tR=%d/%d %.5f\tF1=%.5f' % (TP, PP, prec, TP, RR, reca,
                                                   F1)
    print(ret)
    show = True
    if show:
        amount = 0
        entityNum = 0
        findentityNum = 0
        score = 0
        with open('./results/ner_cnn.txt', 'w', encoding='utf-8') as fout:
            for x, y, z in zip(X_test, Y_test, Z):
                for xx, yy, zz in zip(x, y, z):
                    tagN = 0
                    for i in range(4):
                        if yy[i] == 1:
                            tagN = i + 1
                            break
                    tagY = id_to_tag[tagN]
                    maxpro = 0
                    otagN = 0
                    for j in range(4):
                        if zz[j] > maxpro:
                            maxpro = zz[j]
                            otagN = j + 1
                    tagZ = id_to_tag[otagN]
                    word = id_to_word[xx]
                    if word != '<PAD>':
                        amount = amount + 1
                        if ((tagY == 'ORG') or (tagY == 'LOC')
                                or (tagY == 'PER')):
                            entityNum = entityNum + 1
                        if tagY == tagZ:
                            score = score + 1
                            if ((tagY == 'ORG') or (tagY == 'LOC')
                                    or (tagY == 'PER')):
                                findentityNum = findentityNum + 1
            # #wd = ee.id2wd[wid]
            # wd = tt
            # if zz > 0.5: wd = wd + '@%.3f' % zz
            # es.append(wd)
            # if wid == 0: break
                        fout.writelines([
                            str(word) + '  ' + str(tagY) + '   ' + str(tagZ) +
                            '\n'
                        ])
    print('Num of Character : ' + str(amount))
    print('Num of Character NER : ' + str(score))
    print('Precison ' + str(score / amount))
    print('Num of entity : ' + str(entityNum))
    print('Num of entity NER : ' + str(findentityNum))
    print('NER accuracy : ' + str(findentityNum / entityNum))
예제 #10
0
    model.add(Dropout(0.5))

## Second hidden layer.
model.add(Dense(second_layer_width))
model.add(Activation("relu"))
if dropout_rate > 0:
    model.add(Dropout(0.5))

model.add(Dense(second_layer_width))
model.add(Activation("relu"))
if dropout_rate > 0:
    model.add(Dropout(0.5))

model.add(Dense(y_train.shape[1]))
## For classification, the activation is softmax
model.add(Activation('softmax'))
## Define optimizer. In this tutorial/codelab, we select SGD.
## You can also use other methods, e.g., opt = RMSprop()
opt = SGD(lr=learning_rate, clipnorm=5.)
## Define loss function = 'categorical_crossentropy' or 'mean_squared_error'
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])

model.fit(scaled_x_train, y_train, nb_epoch=20, batch_size=32)

# Final test predict
test_proba = model.predict(scaled_test)
print(test_proba.shape)
print(test_proba[:5])
test_classes = np_utils.probas_to_classes(test_proba)
print(test_classes.shape)
print(test_classes[:5])
예제 #11
0
	def _predict(self, X_test):
		proba = self.predict_proba(X_test)
		return np_utils.probas_to_classes(proba)
예제 #12
0
    rms_optimizer = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms_optimizer, class_mode="categorical", theano_mode='FAST_COMPILE')

    print("Train...")
    model.fit(X_train, y_train_cat, batch_size=batch_size, nb_epoch=nb_epoch, validation_split=0.1, show_accuracy=True)
    score = model.evaluate(X_test, y_test_cat, batch_size=batch_size)
    print('Test score:', score)

    classes_proba = model.predict_proba(X_test, batch_size=batch_size)
    for i in range(5):
        probs = sorted(zip(range(len(classes_proba)), classes_proba[i].tolist()), key=lambda x: x[1], reverse=True)
        print('Test sample %d (Correct label: %s)' % (i, id_to_word[y_test[i]]))
        for j, p in probs[:5]:
            print(id_to_word[j].ljust(20) + ': ' + str(p))

    classes = np_utils.probas_to_classes(classes_proba)

    correct, wrong = 0, 0
    for (i,q) in enumerate(test_questions):
        options = q[5]
        options_probs = classes_proba[i][options]
        best_idx = np.argmax(options_probs)
        predicted = options[best_idx]
        print('Test sample %d (Correct label: %s)' % (i, id_to_word[y_test[i]]))
        for k in range(len(options)):
            print(id_to_word[options[k]].ljust(20) + ': ' + str(options_probs[k]))

        if predicted == y_test[i]:
            correct += 1
        else:
            wrong += 1
        h = F.relu(self.l1(x))
        return self.l2(h)


class Classifier(Chain):
    def __init__(self, predictor):
        super(Classifier, self).__init__(predictor=predictor)

    def __call__(self, x, t):
        y = self.predictor(x)
        self.loss = F.softmax_cross_entropy(y, t)
        self.accuracy = F.accuracy(y, t)
        return self.loss


trY, teY = np_utils.probas_to_classes(trY).astype(np.int32), np_utils.probas_to_classes(teY).astype(np.int32)

# net = MNISTNet()
# x = chainer.Variable(trX)
# print net(x)

model = L.Classifier(MNISTNet())
optimizer = optimizers.SGD()
optimizer.setup(model)

# x = chainer.Variable(trX)
# t = chainer.Variable(trY)
# print model(x, t)

teX = chainer.Variable(teX)
teY = chainer.Variable(teY)
예제 #14
0
# use decoder to decode latent space information into images

z_in = layers.Input(shape=(L_SIZE, ))
y_in = layers.Input(shape=(OUT_SIZE, ))
decoder_in = layers.merge([z_in, y_in], mode='concat', concat_axis=1)
decoder_hidden = P_(decoder_in)
decoder_out = f_(decoder_hidden)
DECODER = Model([z_in, y_in], decoder_out)
"""
	PLOTTING
"""
# ensure that encoder and decoder have a .predict() function on each.
x_encoded = ENCODER.predict([X_train, Y_train], batch_size=BATCH_SIZE)
f, ax = utils.data_on_latent_space(x_encoded,
                                   np_utils.probas_to_classes(Y_train))
# f.colorbar()

f.savefig('latent_viz.cvae.pdf')

n_digits = 10
n = 5
latent_range = (0.05, 0.95)
digit_shape = (28, 28)

figure = np.zeros((digit_shape[0] * n_digits, digit_shape[1] * n))

for i in range(1, n_digits + 1):
    digit_code = np.zeros(
        n_digits)  # encode the digit we are trying to generate
    digit_code[i - 1] = 1
예제 #15
0
    classifier.summary()
    classifier.fit(X_train,
                   to_categorical(y_train,
                                  len(y_values) + 1),
                   batch_size=BATCH_SIZE,
                   class_weight=class_weight,
                   verbose=1,
                   nb_epoch=EPOCHS)

    logger.debug(classifier)

    logger.info("Reading test data")
    X_dev, y_dev_gold, _, entities = read_and_map(dev_src, mapper, y_values)

    logger.info("Testing")
    y_dev_pred = probas_to_classes(classifier.predict(X_dev))

    # output entities
    with open(os.path.join(output_dir, "entities.txt"), "w") as f:
        for entity in entities:
            f.write(str(entity) + '\n')

    # output gold labels
    with open(os.path.join(output_dir, "gold.txt"), "w") as f:
        for i in xrange(len(y_dev_gold)):
            f.write(y_values[y_dev_gold[i]] + '\n')

    # output predicted labels
    with open(os.path.join(output_dir, "pred.txt"), "w") as f:
        for i in xrange(len(y_dev_pred)):
            f.write(y_values[y_dev_pred[i]] + '\n')
예제 #16
0
def train(model_type='parallel', label_set='full', drop_unk=False,
          word_vecs=None, setup_only=False):
    print "Loading data..."
    df = sentences_df(SENTENCES_CSV, labels=label_set, drop_unk=drop_unk)
    X, y, word2idx, l_enc = load_dataset(df, pad=True)
    print "X shape:", X.shape
    y_orig = y
    y_binary = to_categorical(y)
    labels = np.unique(y_orig)
    nb_labels = labels.shape[0]
    if drop_unk:
        label_set_str = label_set + ' (-unk)'
    else:
        label_set_str = label_set
    print "Number of labels: %i [%s]" % (nb_labels, label_set_str)
    if nb_labels > 2:
        y = y_binary
    maxlen = X.shape[1]
    vocab_size = len(word2idx) + 1 # 0 masking
    if pretrained_embeddings is True:
        word_vectors = load_bin_vec(word_vecs, word2idx)
        add_unknown_words(word_vectors, word2idx)
        embedding_weights = np.zeros((vocab_size+1, emb_dim))
        for word, index in word2idx.items():
            embedding_weights[index,:] = word_vectors[word]
    else:
        embedding_weights = None
    print "Data loaded."

    if setup_only:
        cnn = create_model(vocab_size, nb_labels, emb_dim, maxlen,
                           embedding_weights, filter_hs, nb_filters,
                           dropout_p, trainable_embeddings,
                           pretrained_embeddings, model_type=model_type)
        return {'X': X,
                'y': y,
                'word2idx': word2idx,
                'l_enc': l_enc,
                'y_binary': y_binary,
                'labels': labels,
                'nb_labels': nb_labels,
                'maxlen': maxlen,
                'emb_dim': emb_dim,
                'vocab_size': vocab_size,
                'embedding_weights': embedding_weights,
                'cnn': cnn}

    params = [('filter_hs',filter_hs), ('nb_filters',nb_filters),
              ('dropout_p',dropout_p),
              ('trainable_embeddings',trainable_embeddings),
              ('pretrained_embeddings',pretrained_embeddings),
              ('batch_size',batch_size), ('nb_epoch',nb_epoch),
              ('lr',lr), ('beta_1',beta_1), ('beta_2',beta_2),
              ('epsilon',epsilon)]
    print "\nModel type: %s" % model_type
    for (name, value) in params:
        print name + ':', value

    skf = StratifiedKFold(y_orig, n_folds=10, shuffle=True, random_state=0)
    cv_scores = []
    for i, (train, test) in enumerate(skf):
        start_time = time.time()
        cnn = None
        cnn = create_model(vocab_size,
                           nb_labels,
                           emb_dim,
                           maxlen,
                           embedding_weights,
                           filter_hs,
                           nb_filters,
                           dropout_p,
                           trainable_embeddings,
                           pretrained_embeddings,
                           model_type=model_type)
        if i == 0:
            print_summary(cnn.model.layers)

        acc = train_and_test_model(cnn, X[train], y[train], X[test], y[test],
                                   batch_size, nb_epoch,
                                   lr, beta_1, beta_2, epsilon)
        cv_scores.append(acc)
        train_time = time.time() - start_time
        print('\nLabel frequencies in y[test]')
        print_label_frequencies((y_orig[test], l_enc))
        y_pred = cnn.model.predict(X[test])
        y_pred = probas_to_classes(y_pred)
        c = Counter(y_pred)
        total = float(len(y_pred))
        print('\nLabel frequencies in predict(y[test])')
        for label, count in c.most_common():
            print l_enc.inverse_transform(label), count, count / total
        print "fold %i/10 - time: %.2f s - acc: %.4f on %i samples" % \
            (i+1, train_time, acc, len(test))
    print "Avg cv accuracy: %.4f" % np.mean(cv_scores)
예제 #17
0
    model.add(Dense(out_embedding_size, num_words))
    model.add(Activation('softmax'))

    sgd_optimizer = SGD(lr=0.006, momentum=0.9, decay=0.99, nesterov=True)
    adg_optimizer = Adagrad()
    rms_optimizer = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms_optimizer, class_mode="categorical", theano_mode='FAST_COMPILE')

    print("Train...")
    model.fit(X_train, y_train_cat, batch_size=batch_size, nb_epoch=nb_epoch, validation_split=0.1, show_accuracy=True)
    score = model.evaluate(X_test, y_test_cat, batch_size=batch_size)
    print('Test score:', score)

    classes_proba = model.predict_proba(X_test, batch_size=batch_size)
    for i in range(5):
        probs = sorted(zip(range(len(classes_proba)), classes_proba[i].tolist()), key=lambda x: x[1], reverse=True)
        print('Test sample %d (Correct label: %s)' % (i, id_to_word[y_test[i]]))
        for j, p in probs[:5]:
            print(id_to_word[j].ljust(20) + ': ' + str(p))

    classes = np_utils.probas_to_classes(classes_proba)
    acc = np_utils.accuracy(classes, y_test)
    print('Test accuracy:', acc)

    # print(classes.shape)
    # print(classes[0])
    # print(y_test[0])

    # classes_list = classes.tolist()
    # print(map(lambda x: id_to_word[x], classes_list[:25]))
예제 #18
0
# finally, an output layer with one node per class
model.add(Dense(num_labels))
model.add(Activation('softmax'))

# use the Adam optimiser
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

# now compile the model, Keras will take care of the Tensorflow boilerplate
model.compile(loss='categorical_crossentropy',
              metrics=['accuracy'],
              optimizer=adam)

# for quicker training, just using one epoch, you can experiment with more
model.fit(train_x,
          train_y,
          validation_data=(valid_x, valid_y),
          batch_size=32,
          nb_epoch=1)

# finally, evaluate the model using the withheld test dataset

# determine the ROC AUC score
y_prob = model.predict_proba(test_x, verbose=0)
y_pred = np_utils.probas_to_classes(y_prob)
y_true = np.argmax(test_y, 1)
roc = metrics.roc_auc_score(test_y, y_prob)
print "ROC:", round(roc, 3)

# determine the classification accuracy
score, accuracy = model.evaluate(test_x, test_y, batch_size=32)
print("\nAccuracy = {:.2f}".format(accuracy))
        h = F.relu(self.l1(x))
        return self.l2(h)


class Classifier(Chain):
    def __init__(self, predictor):
        super(Classifier, self).__init__(predictor=predictor)

    def __call__(self, x, t):
        y = self.predictor(x)
        self.loss = F.softmax_cross_entropy(y, t)
        self.accuracy = F.accuracy(y, t)
        return self.loss


trY, teY = np_utils.probas_to_classes(trY).astype(
    np.int32), np_utils.probas_to_classes(teY).astype(np.int32)

# net = MNISTNet()
# x = chainer.Variable(trX)
# print net(x)

model = L.Classifier(MNISTNet())
optimizer = optimizers.SGD()
optimizer.setup(model)

# x = chainer.Variable(trX)
# t = chainer.Variable(trY)
# print model(x, t)

teX = chainer.Variable(teX)
teY = chainer.Variable(teY)
예제 #20
0
def train_model(model_generator, feature_set):

    # TODO get data function that returns dict (options for one hot or not, val or not)
    # load dataset
    paths = u.load_paths('PATHS.yaml')  # get paths from file
    train_x, val_x, test_x = u.load_data(paths['extracted_data'] +
                                         'features_%s.p' % feature_set)
    train_y, val_y, test_y = u.load_data(paths['extracted_data'] +
                                         'labels_%s.p' % feature_set)

    model_name, model, training = model_generator(n_dim=train_x.shape[1:],
                                                  n_labels=test_y.shape[1])
    run_id = '%s_%s' % (model_name, datetime.datetime.now().isoformat())
    print('\nTrain and Evaluate: %s' % model_name)

    # callbacks
    earlystop = EarlyStopping(monitor='val_loss',
                              patience=training.early_stop_patience,
                              verbose=1,
                              mode='auto')
    log_dir = os.path.join(paths['tensorboard_logs'], run_id)
    tensorboard = TensorBoard(log_dir=log_dir,
                              histogram_freq=3,
                              write_graph=True)
    t0 = time.time()
    history = model.fit(train_x,
                        train_y,
                        validation_data=(val_x, val_y),
                        callbacks=[earlystop, tensorboard],
                        nb_epoch=training.n_epoch,
                        batch_size=training.batch_size)
    training_time = time.time() - t0

    # test
    y_prob = model.predict_proba(test_x, verbose=0)
    y_pred = np_utils.probas_to_classes(y_prob)
    y_true = np.argmax(test_y, 1)

    # evaluate the model's accuracy
    t0 = time.time()
    score, accuracy = model.evaluate(test_x,
                                     test_y,
                                     batch_size=training.batch_size)
    testing_time = time.time() - t0
    cm = confusion_matrix(y_true, y_pred, labels=None)
    # p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average='micro')
    # roc = roc_auc_score(test_y, y_prob)
    # print("F-Score:", round(f, 2))  # similar value to the accuracy score, but useful for cross-checking
    # print("ROC:", round(roc, 3))

    # print results
    print("\nclassifier: %s" % model_name)
    print("training time: %0.4fs" % training_time)
    print("testing time: %0.4fs" % testing_time)
    print("accuracy: %0.4f" % accuracy)
    print("confusion matrix:\n%s" % cm)
    # print model.summary()

    # plot and save results
    fname = paths['model_save'] + model_name + '_accuracy_%0.2f' % accuracy
    u.plot_keras_loss(fname, history)  # saves plot png
    model.save(fname + '.h5')
    cm_path = './confusion_plots/%s' % model_name
    cm_title = '%s (Accuracy: %0.2f)' % (model_name, accuracy)
    u.plot_confusion_matrix(cm, cm_path, title=cm_title)
예제 #21
0
파일: keras_tools.py 프로젝트: jhayes14/Num
 def predict(self, Xtest):
     ypred = self.model.predict(Xtest, batch_size=self.batch_size, verbose=self.verbose)
     return np_utils.probas_to_classes(ypred)
예제 #22
0
 def print_class_numbers(Y, nb_classes):
     classes = np_utils.probas_to_classes(Y)
     for i in xrange(nb_classes):
         print 'class %d: %d' % (i, np.sum(classes == i))
예제 #23
0
 def predict_class_label(self, audio_file):
     x_features = self.load_features(audio_file)
     instrument_class = np_utils.probas_to_classes(self.model.predict(x_features, verbose=0))[0]
     label = self.instr_family_le.inverse_transform(instrument_class)
     return label
예제 #24
0
model.save('digitRecognitionModel.h5')  # creates a HDF5 file 'my_model.h5'

#pre=x_test[0]

#==============================================================================
# pre=np.array([pre])
# #print pre.shape[0]
# predictedValue =model.predict(pre)
# print predictedValue
# rounded = [round(x) for x in predictedValue[0]]
# print rounded
# predictedClass=np_utils.probas_to_classes(predictedValue)
# print predictedClass
#==============================================================================

#######################prediction of image file#########################

originalImage = Image.open('digitsImages/5.bmp')
reducedImage = originalImage.resize((28,28))
#plt.imshow(reducedImage ,cmap=plt.get_cmap('gray'))
imageArray= np.array(reducedImage)
numPixels=imageArray.shape[0]*imageArray.shape[1]
imageArray=imageArray.reshape(1,numPixels).astype('float32')

predictedValue =model.predict(imageArray)
predictedClass=np_utils.probas_to_classes(predictedValue)
print "Digit is ",predictedClass


    
    
예제 #25
0
]

input_test = [
    subject_idx_test, question_idx_test, comment_idx_test,
    subj_overlap_idx_test, ques_overlap_idx_test, comm_overlap_idx_test,
    overlap_feat_test
]

print('Load Trained Model')
model = model_from_json(open(base_model_path).read())
model.load_weights(current_trained_model_path)

y_pred_train = model.predict(input_train)
y_pred_test = model.predict(input_test)

y_pred_test_cls = probas_to_classes(y_pred_test)

ofile = open('data_folder/semeval2016-task3-taskA-pred.tsv', 'wb')
for i, (qid, cid) in enumerate(zip(relq_id_test, relc_id_test)):
    p = y_pred_test_cls[i]
    if p == 0:
        label = 'false'
    elif p == 1:
        label = 'true'
    proba = y_pred_test[i][1]

    outline = '{}\t{}\t{}\t{}\t{}\n'.format(qid, cid, 0, proba, label)
    ofile.write(outline)
ofile.close()

train_results = defaultdict(lambda: [])
예제 #26
0
 def predict_class_label(self, audio_file):
     x_features = self.load_features(audio_file)
     instrument_class = np_utils.probas_to_classes(
         self.model.predict(x_features, verbose=0))[0]
     label = self.instr_family_le.inverse_transform(instrument_class)
     return label
예제 #27
0
print('Load Supervised  Model')
model.load_weights(super_weight_path)

for tids, X_test, y_test, name in test_sets:
    raw_data = open(os.path.join('semeval', '{}.tsv'.format(name)),
                    'r').readlines()
    raw_data = map(lambda x: x.replace('\n', '').split('\t'), raw_data)
    raw_tweets = map(lambda x: (x[0], x[-1]), raw_data)
    raw_lables = map(lambda x: (x[0], x[-2]), raw_data)

    raw_data_dict = dict(raw_tweets)
    raw_lables_dict = dict(raw_lables)
    ofile = open(os.path.join(res_dir, name), 'w')

    y_pred = model.predict(X_test)
    y_pred = probas_to_classes(y_pred)
    score = semeval_f1_taskA(y_test, y_pred)
    scores = f1_score(y_test, y_pred, average=None)
    output += '{}: {}\t'.format(name, score)
    output += 'neg_f1: {}\t neut_f1: {}\t pos_f1: {}'.format(
        scores[0], scores[1], scores[2])

    for tid, label in zip(tids, y_pred):
        tweet = raw_data_dict[tid].replace('\n', '')
        truth = raw_lables_dict[tid]
        l = {0: 'negative', 1: 'neutral', 2: 'positive'}.get(label)
        outline = '{}\t{}\t{}\n'.format(tweet, truth, l)
        ofile.write(outline)

open(os.path.join('results', 'results_log.tsv'), 'a').write(output + '\n')
예제 #28
0
              optimizer='sgd',
              metrics=['accuracy'])
model.fit(trainData,
          y_train,
          batch_size=batch_size,
          nb_epoch=nb_epoch,
          verbose=1,
          validation_data=(testData, y_test))
score = model.evaluate(testData, y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
W, b = model.layers[0].get_weights()
# print('Weights=', W, '\n biases=', b)
y_test_pred = model.predict(testData)
print(
    np_utils.probas_to_classes(y_test)[:10],
    np_utils.probas_to_classes(y_test_pred)[:10])
'''test '''
predictFileName = "./data/test.csv"
x_pre = np.array(pd.read_csv(predictFileName))
print("x_pre shape", x_pre.shape)
y_pred = model.predict(x_pre)
pre_class = np_utils.probas_to_classes(y_pred)
print("predict class :", pre_class)
index = np.linspace(1, len(pre_class), len(pre_class))
print("index", index)

np.savetxt("./data/pre.csv",
           list(zip(index, pre_class)),
           delimiter=',',
           fmt='%10.5f')
예제 #29
0
파일: CNN_SA.py 프로젝트: rippleblue/Python
for train_index,valid_index in kfold.split(np.array([0]*3000),np.array([0]*3000)):
	print('<<<<<COUNT>>>>> '+str(count))
	model = Model(input = [input1], output=[output])
	# load model weight
	CNN_model = model.load_weights('model(0.9375).h5')
	model.compile(loss='binary_crossentropy', optimizer = "adam", metrics=['accuracy'])
	loss, accuracy = model.evaluate(train[valid_index], train_label[valid_index], verbose=0)		
	valid_score.extend([accuracy])
	print('loss: '+str(loss)+', acc: '+str(accuracy))
	count = count+1
print("valid: %.2f%% (+/- %.2f%%)" % (np.mean(valid_score), np.std(valid_score)))	
"""

"""predict"""
predictions = model.predict({'input1':test})
POS_predict_keras = np_utils.probas_to_classes(predictions)
pre = np.sum(test_label[:,1]==POS_predict_keras)/len(POS_predict_keras)
print(pre)
 					

"""save result"""
model.save('CNN_model.h5')

import csv
f = open("POS_predict_keras.csv", "w")
writer = csv.writer(f, lineterminator="\n")
writer.writerow(POS_predict_keras)
f.close

#######
'''