예제 #1
0
    def post(self):
        """SComparing similairty of faces in two different images."""
        logging.info('Received post message.')

        args = file_upload.parse_args()
        # print(args)

        if 'image' not in args['first_image'].content_type:
            logging.error('First file is not an image')
            return api.abort(
                400, 'Expecting only image files. First File is not an image.')
        if 'image' not in args['second_image'].content_type:
            logging.error('Second file is not an image')
            return api.abort(
                400,
                'Expecting only image files. Second file is not an image.')

        filename1 = os.path.join(Configurations().image_upload_folder,
                                 args['first_image'].filename)
        filename2 = os.path.join(Configurations().image_upload_folder,
                                 args['second_image'].filename)

        logging.info('Saving image files.')
        args['first_image'].save(filename1)
        logging.info('Saved {}'.format(filename1))
        args['second_image'].save(filename2)
        logging.info('Saved {}'.format(filename2))

        logging.info('Loading image files.')
        first_image = face_recognition.load_image_file(filename1)
        logging.info('Loaded first image.')
        second_image = face_recognition.load_image_file(filename2)
        logging.info('Loaded second image.')

        # dets = detector(first_image, 1)
        logging.info('Finding Facial encodings')
        first_image_encoding = face_recognition.face_encodings(first_image)
        second_image_encoding = face_recognition.face_encodings(second_image)

        if len(first_image_encoding) == 0:
            logging.error('Unable to detect face in the first image')
            api.abort(500, 'Unable to detect face in the first image')

        if len(second_image_encoding) == 0:
            logging.error('Unable to detect face in the second image')
            api.abort(500, 'Unable to detect face in the second image')
        # comparision_result = face_recognition.compare_faces([first_image_encoding], second_image_encoding)
        logging.info('Finding distance between faces.')
        face_distance = np.linalg.norm(first_image_encoding[0] -
                                       second_image_encoding[0])
        logging.info('Face distance found as {}'.format(face_distance))

        return {'similarity': 1 - face_distance}, 201
예제 #2
0
def store_selection():
    configs = Configurations()

    store = configs.first()
    if len(configs) > 1:
        selection = raw_input('Store ID: ')
        store = int(selection) if selection else store

    config = configs.to(store)
    return store, config
예제 #3
0
파일: train.py 프로젝트: dhoman01/im2latex
def main(_):
    config = Configurations()
    config.input_file_pattern = FLAGS.input_file_pattern
    g = tf.Graph()
    with g.as_default():
        model = Model("train", config)
        model.build()

        train_op = model.train_op


    tf.contrib.slim.learning.train(
        model.train_op,
        config.train_dir,
        log_every_n_steps=FLAGS.log_every_n_steps,
        graph=g,
        global_step=model.global_step,
        number_of_steps=FLAGS.number_of_steps,
        saver=model.saver)
예제 #4
0
def main(_):
    # Build the inference graph.
    g = tf.Graph()
    with g.as_default():
        model = inference_wrapper.InferenceWrapper()
        restore_fn = model.build_graph_from_config(Configurations(),
                                                   FLAGS.checkpoint_path)
    g.finalize()

    # Create the vocabulary.
    vocab = vocabulary.Vocabulary(FLAGS.vocab_file)

    filenames = []
    for file_pattern in FLAGS.input_files.split(","):
        filenames.extend(tf.gfile.Glob(file_pattern))
    tf.logging.info("Running caption generation on %d files matching %s",
                    len(filenames), FLAGS.input_files)

    with tf.Session(graph=g) as sess:
        # Load the model from checkpoint.
        restore_fn(sess)

        # Prepare the caption generator. Here we are implicitly using the default
        # beam search parameters. See caption_generator.py for a description of the
        # available beam search parameters.
        generator = caption_generator.CaptionGenerator(model, vocab)

        for filename in filenames:
            with tf.gfile.GFile(filename, "r") as f:
                image = f.read()
            captions = generator.beam_search(sess, image)
            print("Captions for image %s:" % os.path.basename(filename))
            for i, caption in enumerate(captions):
                # Ignore begin and end words.
                sentence = [
                    vocab.id_to_word(w) for w in caption.sentence[1:-1]
                ]
                sentence = " ".join(sentence)
                print("  %d) %s (p=%f)" %
                      (i, sentence, math.exp(caption.logprob)))
예제 #5
0
                invalid_params.append('config_template')

    if len(invalid_params) > 0:
        show_invalid_params(invalid_params, parsed_params)
        show_help(program_name, parsed_params)
        sys.exit(1)

    return parsed_params


def create_logs_path():
    logpath = os.path.join(BASE_DIR, 'logs')
    if not os.path.exists(logpath):
        log.info("Key directory doesn't exist. Making the directory ...")
        try:
            os.mkdir(logpath)
        except Exception as e:
            log.error('Error: Failed to create dir {}'.format(logpath))
            log.error('Exception occured', exc_info=True)
            sys.exit(1)


if __name__ == "__main__":
    create_logs_path()

    parsed_params = parse_cmd_parameters()

    config = Configurations(parsed_params)

    manager = Manager(config)
    manager.start()
예제 #6
0
            optimizer.apply_gradients(grads_and_vars=zip(
                grads, model.variables))  # 更新参数
            if (batch + 1) % 50 == 0:
                print('[Epoch{} Batch{}] loss:{:.3f}'.format(
                    epoch + 1, batch + 1, loss.numpy()))
        manager.save()  # 每个epoch后保存一个checkpoint
        print('Epoch{} Loss: {:.5f}'.format(epoch + 1, np.mean(epoch_loss)))
        print('***************')


if __name__ == '__main__':
    train_X = np.loadtxt('/data/train_X.txt', dtype='int')
    train_Y = np.loadtxt('/data/train_Y.txt', dtype='int')
    test_X = np.loadtxt('/data/test_X.txt', dtype='int')

    index2word, word2index, embedding_matrix = load_vocab_embedding_matrix()

    config = Configurations()

    train_dataset = tf.data.Dataset.from_tensor_slices(
        (train_X, train_Y)).batch(config.batch_size)

    model = Seq2seq(vocab_size=embedding_matrix.shape[0],
                    embedding_dim=embedding_matrix.shape[1],
                    embedding_matrix=embedding_matrix,
                    gru_units=config.hid_dim,
                    dropout_rate=config.dropout)

    training(model, train_dataset, config.epochs, config.learning_rate,
             word2index['<PAD>'])
예제 #7
0
"""Snake game in python"""
from pathlib import Path

import pygame
from direction import Direction
from game import Game
from configurations import Configurations

pygame.init()
pygame.display.set_caption("PySnake")
ABS_PATH = str(Path(r"images\png-snake-icon.png").absolute())
icon = pygame.image.load(ABS_PATH)
pygame.display.set_icon(icon)
clock = pygame.time.Clock()

cf = Configurations()
game = Game(cf)
win = pygame.display.set_mode(
    (cf.board_width + 1, cf.board_width + 1 + cf.cell_size * 2))

# Main game loop
while True:
    pygame.time.delay(260 - game.level * 20)
    clock.tick(40)

    for event in pygame.event.get():
        if event.type == pygame.QUIT:
            game.quit_game()
        if event.type == pygame.KEYDOWN:
            if event.key == pygame.K_RIGHT:
                game.snake.set_direction(Direction.RIGHT)