Example #1
0
def gen_train_val_data(sample_dir, train_rate, num_classes, condition_classes):
    train_data, train_label, valid_data, valid_label, train_n, valid_n, note_label = load_image(sample_dir, 1.0).gen_train_valid()
    train_data_trip = create_pairs(train_data, num_classes, condition_classes)
    train_num = int(train_n*train_rate)
    train_data_trip = train_data_trip[0:train_num]
    val_data_trip = train_data_trip[-train_num:-1]
    test_data_trip = val_data_trip
    return train_data_trip, val_data_trip, test_data_trip
Example #2
0
def norikae():
    if request.method == "POST":
        start = request.form.get('start')
        dest = request.form.get('dest')
        choice = request.form.get('options')
        date = request.form.get('date')
        input_time = date.split()[1].split(':')
        # print(date)
        # print(date.split()[1].split(':'))

        # CASE 1: if start and dest is the same:
        if start == dest:
            return "It's same whyyyy"
        else:
            if choice == CHOICES[0]:
                # if non-weighted graph
                connect_list = utils.create_pairs(network)
                graph = utils.build_graph(connect_list, outtages)
                path, dist = bfs.print_bfs(graph, start, dest)

            elif choice == CHOICES[1]:
                pass
            elif choice == CHOICES[2]:
                graph = utils.build_weighted_graph(timeSchedule)
                dijkstra.print_dfs(graph, start, dest)
                schedules = utils.process_timeJson(timeSchedule)
                path, dist = None, None

        # return redirect('/')
        return render_template('result.html', path=path, dist=dist, time=None)
    else:
        rand_num = [random.randint(1, NUM_STATIONS + 1) for _ in range(2)]

        return render_template('norikae.html',
                               network=network,
                               selected_stations=rand_num)
Example #3
0
from keras.models import Model
from keras.layers import Input, Lambda

faces_dir = 'att_faces/'

# import training and test data
(X_train, y_train), (X_test, y_test) = utils.get_data(faces_dir)
num_classes = len(np.unique(y_train))

# create siamese neural network
input_shape = X_train.shape[1:]
siamese_nn = utils.create_siamese_nn(input_shape)
input_left = Input(shape=input_shape)
input_right = Input(shape=input_shape)
output_left = siamese_nn(input_left)
output_right = siamese_nn(input_right)
distance = Lambda(utils.euclidean_distance, output_shape=(1,))([output_left, output_right])
model = Model(inputs=[input_left, input_right], outputs=distance)

# train model
training_pairs, training_labels = utils.create_pairs(X_train, y_train, 
                                                     num_classes=num_classes)
model.compile(loss=utils.contrastive_loss, optimizer='adam', 
              metrics=[utils.accuracy])
model.fit([training_pairs[:,0], training_pairs[:,1]], training_labels,
           batch_size=128, epochs=10)

# save model
model.save('siamese_nn.h5')

Example #4
0
from utils import euclidean_distance
from keras.models import Model
from utils import load_train_test, create_pairs, contrastive_loss
import numpy as np

input_shape = (112, 92, 1)

input_top = Input(shape=input_shape)
input_bottom = Input(shape=input_shape)

sh_network = shared_network(input_shape)
output_top = sh_network(input_top)
output_bottom = sh_network(input_bottom)

distance = Lambda(euclidean_distance,
                  output_shape=(1, ))([output_top, output_bottom])

model = Model(inputs=[input_top, input_bottom], outputs=distance)

X_train, Y_train, X_test, Y_test = load_train_test("faces")

num_classes = len(np.unique(Y_train))
training_pairs, training_labels = create_pairs(X_train, Y_train,
                                               len(np.unique(Y_train)))
test_pairs, test_labels = create_pairs(X_test, Y_test, len(np.unique(Y_test)))

model.compile(loss=contrastive_loss, optimizer='adam')
model.fit([training_pairs[:, 0], training_pairs[:, 1]],
          training_labels,
          batch_size=64,
          epochs=10)
def main(argv=None):
    if argv is None:
        argv = sys.argv
    try:
        try:
            opts, args = getopt.getopt(argv[1:], "shc", ["send", "help"])
        except getopt.error, msg:
            raise Usage(msg)

        # option processing
        send = False
        for option, value in opts:
            if option in ("-s", "--send"):
                send = True
            if option in ("-h", "--help"):
                raise Usage(help_message)

        # Parse configuration
        config = parse_yaml()
        for key in REQRD:
            if key not in config.keys():
                raise Exception(
                    'Required parameter %s not in yaml config file!' % (key,))

        participants = config['PARTICIPANTS']
        couples = config['COUPLES']
        if len(participants) < 2:
            raise Exception('Not enough participants specified.')

        # Mail parsing
        f = open('templates/mail.html', 'r')
        mail_html = ""
        while 1:
            line = f.readline()
            if not line:
                break
            mail_html += line

        f.close()

        givers = []
        for person in participants:
            name, email = re.match(r'([^<]*)<([^>]*)>', person).groups()
            name = name.strip()
            partner = None
            for couple in couples:
                names = [n.strip() for n in couple.split(',')]
                if name in names:
                    # is part of this couple
                    for member in names:
                        if name != member:
                            partner = member
            person = Person(name, email, partner)
            givers.append(person)

        recievers = givers[:]
        pairs = create_pairs(givers, recievers)
        if not send:
            print """
                    Test pairings:

                    %s

                    To send out emails with new pairings,
                    call with the --send argument:

                    $ python secret_santa.py --send

            """ % ("\n".join([str(p) for p in pairs]))

        for pair in pairs:

            if send:
                to = "%s <%s>" % (pair.giver.name, pair.giver.email)
                mail = HtmlMail(
                    config['SUBJECT'], config['FROM'], to, config['USERNAME'],
                    config['PASSWORD'])

                mail.send(
                    parse_email(config['TEMPLATE']).format(
                        config['SUBJECT'], pair.giver.name, pair.reciever.name,
                        config['LIMIT'], config['DEATHLINE'])
                )
                print "Emailed %s <%s>" % (pair.giver.name, pair.giver.email)
Example #6
0
File: main.py Project: Kukant/MFF1
model = get_CNN_model((28, 28, 1))
model.summary()

model_siam = get_Siammese_model(model, (28, 28, 1))
model_siam.summary()

# compile the model with Adam optimizer and e.g. binary cross entropy loss
# or define your own loss function, see e.g. here:
# you may add the metrics of your choice

plot_model(model_siam)
model_siam.compile(loss="binary_crossentropy", optimizer='adam', metrics=["accuracy"])


x1, x2, y = create_pairs(x_train, y_train, 300000)
x1_test, x2_test, y_pair_test = create_pairs(x_test, y_test, 10000)
ret = model_siam.fit([x1, x2], y, batch_size=128, epochs=1, validation_data=([x1_test, x2_test], y_pair_test))
# I have experimented with the batch size, but the results were quite similar. However the smaller the batch size,
# the more time the learning took.

# i have also tried more epochs, however i only saw improvement in accuracy and not validation_accuracy, which
# clearly meant the NN is overfitting

# save both models
model_siam.save("models/siam.model")
model.save("models/cnn.model")

# the best result:
# 300000/300000 [==============================] - 148s 494us/step - loss: 0.1930 - acc: 0.9265 - val_loss: 0.1496 - val_acc: 0.9413
Example #7
0
        model = build_model(net=net,
                            input_shape=input_shape,
                            siamese_weights=args.siamese_weights,
                            share=True)
    else:
        model = build_model(net=net,
                            input_shape=input_shape,
                            siamese_weights=args.siamese_weights,
                            share=False)

    if args.convert is None:
        filepath = 'saved/siam_%s_{val_loss:.4f}-{val_dacc:.4f}-{epoch:03d}.h5' % net
        model.compile(loss=[contrastive_loss], optimizer=opt, metrics=[dacc])
        print('train siamese net.')
        digit_indices = [np.where(y_train == i)[0] for i in range(num_classes)]
        tr_pairs, tr_y = create_pairs(x_train, digit_indices, num_classes)
        print(tr_pairs.shape)
        digit_indices = [np.where(y_test == i)[0] for i in range(num_classes)]
        te_pairs, te_y = create_pairs(x_test, digit_indices, num_classes)
        print(te_pairs.shape)
        sample_num = tr_pairs.shape[0]
        del (x_train, x_test)
        train_gen = DataGenerator('train',
                                  tr_pairs,
                                  tr_y,
                                  batch_size=batch_size,
                                  size=32)
        val_data = ([te_pairs[:, 0], te_pairs[:, 1]], te_y)
    else:
        print('convert siamese net to softmax net')
        filepath = 'saved/classify_%s_{val_loss:.4f}-{val_accuracy:.4f}-{epoch:03d}.h5' % net