def init_network(self):
        input_var =theano.tensor.tensor4('input')
        network = cnn.build_model(input_var)['prob']
        base_data = np.load(self.weigths_file)
        base_data = base_data[base_data.keys()[0]]
        lasagne.layers.set_all_param_values(network, base_data)

        x = theano.tensor.tensor4('x')
        y = lasagne.layers.get_output(network, x, deterministic=True)
        return theano.function( [x], y , allow_input_downcast=True )
Exemplo n.º 2
0
    data_x = np.array(x)
    data_x = data_x.reshape(len(data_x), 1, 48, 48)
    return data_x


'''
根据model和data_x,找出概率最大的结果并打印出来
'''


def recognize(model, decoder, data_x):
    print('\nrecognizing...')
    for x in data_x:
        x = x.reshape(1, 1, 48, 48)
        r = model.predict(x)
        index = np.argmax(r)
        print(decoder[index] + '\t-->\t' + str(r.max()))


if __name__ == '__main__':
    print('loading model...')
    model = cnn.build_model()
    model.load_weights('model.h5')
    decoder = cPickle.load(open('./decoder.pkl', 'rb'))
    print('loading model finished')

    recognize(model, decoder, load_data('./results/0'))
    recognize(model, decoder, load_data('./results/1'))
    recognize(model, decoder, load_data('./results/2'))
    pass
Exemplo n.º 3
0
        assert len(data.x[0]) == len(data.y)
    
    # save the data for cnn since it takes forever to generate
    # also save the concept dict order for faster prediction
    concept_order = uniq(concept.ids)
    data = [tr_data,val_data,concept_order]
    with open('gitig_new_data.pickle','wb') as f:
        pickle.dump(data,f,protocol=4)
    logger.info('Mentions and concepts saved.')


# cnn
if not int(config['model']['use_saved_model']):    # train new model
    import cnn, model_tools
    cnn.print_input(tr_data)
    model = cnn.build_model(config,tr_data,vocabulary,pretrained)


    # select hardest training samples from preliminary training
    if config.getint('training','sample_hard'):
        import sp_training
        from datetime import datetime
        # from callback import EarlyStoppingRankingAccuracy
        # evaluation_function_1 = EarlyStoppingRankingAccuracy(config,val_data)
        from callback import EarlyStoppingRankingAccuracySpedUp
        evaluation_function = EarlyStoppingRankingAccuracySpedUp(config,val_data,concept.padded,corpus_dev.padded,pretrained)
        
        try:
            new_tr_data = pickle.load(open('gitig_new_tr_data_ratio.pickle','rb'))
            logger.info('Using saved subsampled data')
        except OSError:
Exemplo n.º 4
0
    data_x.sort(lambda x,y:cmp(x[1], y[1]))
    x = []
    for a, b in data_x:
        x.append(a)
    data_x = np.array(x)
    data_x = data_x.reshape(len(data_x), 1, 48, 48)
    return data_x

'''
根据model和data_x,找出概率最大的结果并打印出来
'''
def recognize(model, decoder, data_x):
    print('\nrecognizing...')
    for x in data_x:
        x = x.reshape(1, 1, 48, 48)
        r = model.predict(x)
        index = np.argmax(r)
        print(decoder[index] + '\t-->\t'  + str(r.max()))

if __name__ == '__main__':
    print('loading model...')
    model = cnn.build_model()
    model.load_weights('model.h5')
    decoder = cPickle.load(open('./decoder.pkl', 'rb'))
    print('loading model finished')

    recognize(model, decoder, load_data('./results/0'))
    recognize(model, decoder, load_data('./results/1'))
    recognize(model, decoder, load_data('./results/2'))
    pass
Exemplo n.º 5
0
test_images = test_images.reshape(-1, img_rows, img_cols, num_channels)
'''

# output dimensions
num_classes = 10

################################################################################
# callbacks for Save weights, Tensorboard
# creating a new directory for each run using timestamp
folder = os.path.join(os.getcwd(), datetime.now().strftime("%d-%m-%Y_%H-%M-%S"), str(ACTIV_FN))
history_file = folder + "\cnn_" + str(ACTIV_FN) + ".h5"
save_callback = ModelCheckpoint(filepath=history_file, verbose=1)
tb_callback = TensorBoard(log_dir=folder)

# Build, train, and test model
model = cnn.build_model(input_shape, activation_fn, LEARNING_RATE, DROP_PROB, NUM_NEURONS_IN_DENSE_1, num_classes)
train_accuracy, train_loss, valid_accuracy, valid_loss = cnn.train_model(model, train_images, train_labels, BATCH_SIZE,
                                                                     NUM_EPOCHS, valid_images, valid_labels,
                                                                     save_callback, tb_callback)
test_accuracy, test_loss, predictions = cnn.test_model(model, test_images, test_labels)

# save test set results to csv
predictions = np.round(predictions)
predictions = predictions.astype(int)
df = pd.DataFrame(predictions)
df.to_csv("mnist.csv", header=None, index=None)

################################################################################
# Visualization and Output
num_epochs_plot = range(1, len(train_accuracy) + 1)
Exemplo n.º 6
0
# input image dimensions
input_shape = x_train[1].shape
# output dimensions
num_classes = 6

# callbacks for Save weights, Tensorboard
# creating a new directory for each run using timestamp
folder = os.path.join(os.getcwd(),
                      datetime.now().strftime("%d-%m-%Y_%H-%M-%S"))
history_file = folder + "/cnn_" + ".h5"
save_callback = ModelCheckpoint(filepath=history_file, verbose=1)
tb_callback = TensorBoard(log_dir=folder)

# Build, train, and test model
model = cnn.build_model(input_shape=input_shape,
                        learn_rate=LEARNING_RATE,
                        drop_prob=DROP_PROB,
                        num_neurons=NUM_NEURONS_IN_DENSE_1)
train_accuracy, train_loss, valid_accuracy, valid_loss = cnn.train_model_datagen(
    model, datagen, val_datagen, x_train, y_train, BATCH_SIZE, NUM_EPOCHS,
    x_valid, y_valid, save_callback, tb_callback)
test_accuracy, test_loss, predictions = cnn.test_model(model, x_test, y_test)

# save test set results to csv
predictions = np.round(predictions)
predictions = predictions.astype(int)
df = pd.DataFrame(predictions)
df.to_csv("predictions_datagen.csv", header=None, index=None)

# Visualization and Output
num_epochs_plot = range(1, len(train_accuracy) + 1)  # x axis range
# Loss curves
Exemplo n.º 7
0
# HYPERPARAMETERS AND DESIGN CHOICES
num_neurons = 128
batch_size = 64
ACTIV_FN = "relu"
activation_fn = cnn.get_activ_fn(ACTIV_FN)
num_epochs = 5
max_count = 50
for count in range(0, max_count):
    learn_rate = 10**uniform(-2, -4)
    drop_prob = 10**uniform(-2, 0)

    # callbacks for Save weights, Tensorboard
    # creating a new directory for each run using timestamp
    folder = os.path.join(os.getcwd(),
                          datetime.now().strftime("%d-%m-%Y_%H-%M-%S"),
                          str(ACTIV_FN))
    tb_callback = TensorBoard(log_dir=folder)

    # Build, train, and test model
    model = cnn.build_model(input_shape, activation_fn, learn_rate, drop_prob,
                            num_neurons, num_classes)
    train_accuracy, train_loss, valid_accuracy, valid_loss = cnn.train_model(
        model, train_images, train_labels, batch_size, num_epochs,
        valid_images, valid_labels, tb_callback)
    print(
        'Step: {:d}/{:d}, learn: {:.6f}, dropout: {:.4f},'
        'Train_loss: {:.4f}, Train_acc: {:.4f}, Val_loss: {:.4f}, Val_acc: {:.4f}'
        .format(count, max_count, learn_rate, drop_prob, train_loss[-1],
                train_accuracy[-1], valid_loss[-1], valid_accuracy[-1]))
Exemplo n.º 8
0
        if self.conf.getint('model', 'save'):
            callback.save_model(self.original_model,
                                self.conf['model']['path'], self.now)
        return

    def on_batch_end(self, batch, logs={}):
        self.losses.append(logs.get('loss'))
        return


from model_tools import load_model, save_model
import cnn, model_tools

if sysargv1 == 'separate':
    if sysargv2 == 'full':
        model, entity_model, concept_model = cnn.build_model(
            config, tr_data, vocabulary, pretrained)
    elif sysargv2 == 'ablation':
        model, entity_model, concept_model = cnn.build_model_maxpool_ablation(
            config, tr_data, vocabulary, pretrained)
elif sysargv1 == 'shared':
    if sysargv2 == 'full':
        model, entity_model, concept_model = cnn.build_model_generator(
            config, vocabulary, pretrained)
    elif sysargv2 == 'ablation':
        model, entity_model, concept_model = cnn.build_model_shared_encoder_xDense(
            config, tr_data, vocabulary, pretrained)

dev_eval_function = EarlyStoppingRankingAccuracyGenerator(
    config, concept, positives_dev_truncated, vocabulary, entity_model,
    concept_model, model, real_val_data)
hist = model.fit(tr_data.x,
Exemplo n.º 9
0
    x_train = x[train_index]
    y_train = y[train_index]
    x_val = x[val_index]
    y_val = y[val_index]

    x_train, y_train = utils.apply_oversampling(x_train,y_train,oversample_val=oversample_val)
    x_train, y_train = utils.apply_undersampling(x_train,y_train,undersample_val=undersample_val)

    # initialize output bias
    neg, pos = np.bincount(y_train)
    output_bias = np.log(pos/neg)
    output_bias = keras.initializers.Constant(output_bias)
    print("Positive Class Counter:",pos)
    print("Negative Class Counter:",neg)

    model = cnn.build_model(input_shape = input_shape, layers = n_layers, filters = n_filters, opt=opt, output_bias=output_bias)
    
    weightsFile = 'numSplit'+str(numSplit)+'_params'+str(parameterNum)

    history = cnn.train_model(model,x_train,y_train,x_val,y_val,
                        weightsDir,weightsFile,
                        patience_count=patience_count,
                        epochs=max_epochs,
                        batch_size=batch_size,
                        class_weights = class_weights)

    model.load_weights(weightsDir+weightsFile+'.h5')

    predictions = model.predict(x_val)
    cm = utils.calc_cm(y_val,predictions)
    precision, recall = utils.calc_binary_metrics(cm)
Exemplo n.º 10
0
length = 35
height = 35

test_degree = 45

alpha = fileprocess.read_alpha_shape("alpha_shape_points.csv")[test_degree]
alpha_graph = batch_prepare.single_alpha(alpha)

graphs = fileprocess.read_graph("relocated_points_all.csv")
test_graph, full_graph = batch_prepare.single_graph_add_window(
    length, height, graphs[test_degree])

model_path = "mnist_mlp_weights.h5"
#model_path = cnn.Result_path + "mnist_mlp_weights.h5"

model = cnn.build_model(dense_size, (35, 35, 1))
model.load_weights(model_path)

_result = model.predict(test_graph)
label = np.argmax(_result, axis=1)
result = label.reshape((110, 120))

fig, axes = plt.subplots(1, 3, figsize=(16, 9), sharex="col", sharey="row")
axes[0].imshow(full_graph, cmap=plt.get_cmap("Blues"))
axes[0].set_title("Original Points")

axes[1].imshow(result, cmap=plt.get_cmap("plasma"))
axes[1].set_title("CNN Result")
"""ax = plt.axes([0.5, 0.6, 0.3, 0.6])"""

axes[2].imshow(alpha_graph, cmap=plt.get_cmap("plasma"))