Esempio n. 1
0
def predict():
    if request.method == 'POST':
        try:
            data = request.get_json()
            f = open('my_classifier.pickle', 'rb')
            classifier = pickle.load(f)
            f.close()

        except ValueError:
            return jsonify("Please enter a mail.")

        return jsonify(classifier.classify(get_features(data["mail"])))
Esempio n. 2
0
embedding_dim = 300

word_indexes = get_word_indexes(word_indexes_filepath)

model = create_model_3(max_caption_len, word_indexes, embedding_dim, feature_dim, embedding_matrix_filepath)

checkpointer = ModelCheckpoint(filepath="model_weights.hdf5", verbose=0)

# model.load_weights('model_weights.hdf5')


while True:
    for i in range(1, 8):
        captions = 'dataset/frequencies/train_captions_' + str(i) + '.npy'
        features = 'dataset/frequencies/train_features_' + str(i) + '.npy'

        print(captions)
        print(features)

        train_captions = get_captions(captions)
        train_features = get_features(features)

        X_captions = train_captions[:, :-1]
        Y = np.expand_dims(train_captions[:, 1:], -1)
        X = [train_features, X_captions]

        try:
            model.fit(X, Y, nb_epoch=1, callbacks=[checkpointer], shuffle=True)
        except KeyboardInterrupt:
            checkpointer.on_epoch_end(0)
Esempio n. 3
0
val_features_filepath = 'dataset/original/merged_val.npy'

processed_train_captions_filepath = 'dataset/frequencies/train_captions_7.npy'
processed_train_features_filepath = 'dataset/frequencies/train_features_7.npy'

processed_val_captions_filepath = 'dataset/frequencies/val_captions.npy'
processed_val_features_filepath = 'dataset/frequencies/val_features.npy'

embeddings_filepath = 'embeddings/glove.6B.300d.txt'
embedding_matrix_filepath = 'layers/embedding_matrix.npy'

embedding_dim = 300
max_caption_len = 19

train_captions = get_captions(train_captions_filepath)
train_features = get_features(train_features_filepath)

train_captions = train_captions[120000:]
train_features = train_features[120000:]

# val_captions = get_captions(val_captions_filepath)
# val_features = get_features(val_features_filepath)

add_sentence_tokens(train_captions)
# add_sentence_tokens(val_captions)

# word_frequencies = calculate_word_frequencies(train_captions, val_captions)
# with open('dataset/frequencies/word_frequencies.json', 'w') as f:
#     json.dump(word_frequencies, f)

with open('dataset/frequencies/word_frequencies.json', 'r') as f:
Esempio n. 4
0
def main(args):

    print "Loading model"
    model = models_dict[args.model_name].build()
    model.load_weights(args.model_weights)

    vc = init_vc(args.vc_input)
    pygame.init()

    pos_corners = calibration(vc)
    
    # Start the diff process
    prev_black = set()
    prev_white = set()
    init = False
    moves_list = []

    game_over = False

    while not game_over:
        frame = np.swapaxes(read_vc(vc),0,1)
        t0 = time.time()
        features, new_image = get_features(frame, pos_corners, height=562)
        t1 = time.time()
        if not init:
            pygame.init()
            width, height = new_image.shape[:2]
            screen = pygame.display.set_mode((args.screen_res,args.screen_res))
            init = True
        
        classes = model.predict(features).argmax(axis=1)
        
        t2 = time.time()
        
        black = set([(i%19,i/19) for i,cl in enumerate(classes) if cl==2])
        white = set([(i%19,i/19) for i,cl in enumerate(classes) if cl==1])

        # get diff
        diff_error = False
        try:
            col,move = diff_move((prev_black,prev_white),(black,white))
        except DiffError:
            diff_error = True
            col, move = None, None

        screen.fill((0,0,0))

        global_surf = pygame.Surface((2*width,2*width))

        # Display camera
        surface = pygame.pixelcopy.make_surface(np.swapaxes(new_image, 0, 1))
        global_surf.blit(surface,(width,width))

        # Display previous
        surface = get_board_surface(540, prev_black, prev_white,clear=False)
        global_surf.blit(surface,(width,0))

        # Display computer vision
        surface = get_board_surface(540, black, white,clear=False,color=(255,0,0) if diff_error else (0,255,0))
        global_surf.blit(surface,(0,0))

        pygame.transform.smoothscale(global_surf, (args.screen_res,args.screen_res), screen)
        pygame.display.flip()

        t3 = time.time()
        print "Times: {:.2f}, {:.2f}, {:.2f}".format(t1-t0,t2-t1,t3-t2)

        recalibrate = False
        events = pygame.event.get()
        for event in events:
            if event.type == pygame.KEYDOWN and event.key==pygame.K_ESCAPE:
                game_over = True
            elif event.type == pygame.KEYDOWN and event.key==pygame.K_c:
                recalibrate = True
            elif event.type == pygame.KEYDOWN and not diff_error:
                moves_list.append((col,move))
                prev_black, prev_white = black, white
        if recalibrate:
            pygame.quit()
            pygame.init()
            pos_corners = calibration(vc)
            init = False
            

    sgf_string = gen_sgf_string(moves_list, args.name_black, args.name_white)
    with open(args.folder+'/'+args.filename,'w') as f:
        f.write(sgf_string)
processed_val_captions_filepath = 'dataset/processed/val_captions.npy'
processed_val_features_filepath = 'dataset/processed/val_features.npy'

embeddings_filepath = 'embeddings/glove.6B.300d.txt'
embedding_matrix_filepath = 'layers/embedding_matrix.npy'

vocab_size = 10000
embedding_dim = 300
max_caption_len = 19

# train_captions = get_captions(train_captions_filepath)
# train_features = get_features(train_features_filepath)

val_captions = get_captions(val_captions_filepath)
val_features = get_features(val_features_filepath)


# add_sentence_tokens(train_captions)
add_sentence_tokens(val_captions)


word_indexes = calculate_word_indexes(train_captions, val_captions)
with open('dataset/word_indexes.json', 'w') as f:
    json.dump(word_indexes, f)
train_captions = None

# with open('dataset/word_indexes.json', 'r') as f:
#     word_indexes = json.load(f)

# embedding_matrix = create_embedding_matrix(word_indexes, embedding_dim, embeddings_filepath)