Esempio n. 1
0
def main(_):
    print('Correcting error...')
    # Set the model path.
    model_path = cged_config.model_path
    data_reader = CGEDReader(cged_config, cged_config.train_path)

    if cged_config.enable_decode_sentence:
        # Correct user's sentences.
        with tf.Session() as session:
            model = create_model(session, True, model_path, config=cged_config)
            print("Enter a sentence you'd like to correct")
            correct_new_sentence = input()
            while correct_new_sentence.lower() != 'no':
                decode_sentence(session, model=model, data_reader=data_reader,
                                sentence=correct_new_sentence,
                                corrective_tokens=data_reader.read_tokens(cged_config.train_path))
                print("Enter a sentence you'd like to correct or press NO")
                correct_new_sentence = input()
    elif cged_config.enable_test_decode:
        # Decode test sentences.
        with tf.Session() as session:
            model = create_model(session, True, model_path, config=cged_config)
            print("Loaded model. Beginning decoding.")
            decodings = decode(session, model=model, data_reader=data_reader,
                               data_to_decode=data_reader.read_tokens(cged_config.test_path, is_infer=True),
                               corrective_tokens=data_reader.read_tokens(cged_config.train_path))
            # Write the decoded tokens to stdout.
            for tokens in decodings:
                sys.stdout.flush()
Esempio n. 2
0
def recognize(target):
    model = create_model()

    #loading weights
    if os.path.exists('weights.h5'):
        model.load_weights('weights.h5')
    else:
        train(model)

    if os.path.isfile(target):
        img = image.load_img(target, target_size=(img_width, img_height))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        prediction = model.predict(x)
        raw_img = matplotlib.image.imread(target)
        plt.imshow(raw_img)
        if prediction:
            result = 'butterflies'
            plt.text(0, -20, 'Это изображение бабочки.', fontsize=20)
        else:
            result = 'flowers'
            plt.text(150, -50, 'Это изображение цветов.', fontsize=20)
        plt.axis("off")
        plt.show()
    else:
        raise IOError('No such file')
Esempio n. 3
0
def main(args, config):
    model = create_model(config)
    loaded_step = load_parameters(model, checkpoint_path=args.checkpoint)
    model.eval()
    if args.vocoder == "waveflow":
        vocoder = WaveflowVocoder()
        vocoder.model.eval()
    elif args.vocoder == "griffin-lim":
        vocoder = GriffinLimVocoder(
            sharpening_factor=config["sharpening_factor"],
            sample_rate=config["sample_rate"],
            n_fft=config["n_fft"],
            win_length=config["win_length"],
            hop_length=config["hop_length"])
    else:
        raise ValueError("Other vocoders are not supported.")

    if not os.path.exists(args.output):
        os.makedirs(args.output)
    monotonic_layers = [
        int(item.strip()) - 1 for item in args.monotonic_layers.split(',')
    ]
    with open(args.input, 'rt') as f:
        sentences = [line.strip() for line in f.readlines()]
    for i, sentence in enumerate(sentences):
        wav = synthesize(args, config, model, vocoder, sentence,
                         monotonic_layers)
        sf.write(os.path.join(args.output, "sentence{}.wav".format(i)),
                 wav,
                 samplerate=config["sample_rate"])
	def __init__(self, sess):

		#keep track of tokens - not indexes - for legibility
		self.query_log = []
		self.response_log = []

		self.vocabulary = pickle.load(open(FLAGS.vocab_dir+'vocab.p', 'rb'))
		self.reverse_vocabulary = pickle.load(open(FLAGS.vocab_dir+'rev_vocab.p', 'rb'))

		#save the session
		self.sess = sess

		#load model and set the batch value to 1
		self.model = train.create_model(self.sess,forward_only=True, checkpoint_dir=FLAGS.checkpoint_dir)
		self.model.batch_size = 1

		#get the buckets from train
		self.buckets = train._buckets

		#open the CBOW model
		if len(FLAGS.cbow_model) > 0:
			self.cbow_model = fasttext.load_model(FLAGS.cbow_model, label_prefix='__LABEL__')
		else:
			self.cbow_model = None

		#store the special unk chars in the model
		self.special_unk_idxes = [modified_utils.CAPS_UNK_ID_1, modified_utils.CAPS_UNK_ID_2, modified_utils.CAPS_UNK_ID_3]
Esempio n. 5
0
def generate_sample(pcnn_module, batch_size, rng_seed=0):
    rng = random.PRNGKey(rng_seed)
    rng, model_rng = random.split(rng)

    # Create a model with dummy parameters and a dummy optimizer
    example_images = jnp.zeros((1, 32, 32, 3))
    model = train.create_model(model_rng, example_images, pcnn_module)
    optimizer = train.create_optimizer(model, 0)

    # Load learned parameters
    _, ema = train.restore_checkpoint(optimizer, model.params)
    model = model.replace(params=ema)

    # Initialize batch of images
    device_count = jax.local_device_count()
    assert not batch_size % device_count, (
        'Sampling batch size must be a multiple of the device count, got '
        'sample_batch_size={}, device_count={}.'.format(
            batch_size, device_count))
    sample_prev = jnp.zeros(
        (device_count, batch_size // device_count, 32, 32, 3))

    # and batch of rng keys
    sample_rng = random.split(rng, device_count)

    # Generate sample using fixed-point iteration
    sample = sample_iteration(sample_rng, model, sample_prev)
    while jnp.any(sample != sample_prev):
        sample_prev, sample = sample, sample_iteration(sample_rng, model,
                                                       sample)
    return jnp.reshape(sample, (batch_size, 32, 32, 3))
Esempio n. 6
0
def _eval():
    if FLAGS.action not in ['valid', 'test']:
        raise ValueError('Unrecognized action: %s' % FLAGS.action)

    eval_files = []
    for f in os.listdir(FLAGS.data_dir):
        if f.endswith('.%s.tfr' % FLAGS.action):
            eval_files.append(os.path.join(FLAGS.data_dir, f))

    fn_queue = tf.train.string_input_producer(string_tensor=eval_files,
                                              num_epochs=1)

    with tf.Session() as sess:
        _, model = create_model(sess, fn_queue)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        corrects = 0
        total = 0
        try:
            step = 0
            while not coord.should_stop():
                loss, probs, labels, weights = sess.run(
                    [model.loss, model.probs, model.label, model.weight])
                corrects += ((labels == probs.argmax(axis=1)) * weights).sum()
                total += weights.sum()
                step += 1
        except tf.errors.OutOfRangeError:
            print('Evaluation done, weighted accuracy %.2f.' %
                  (corrects / total))
        finally:
            coord.request_stop()

        coord.join(threads)
	def __init__(self):
		# Load in model from train.py and load in the trained weights
		self.model = create_model(keep_prob=1) # no dropout
		self.model.load_weights('model_weights.h5')

		# Init contoller for manual override
		self.real_controller = XboxController()
def main():
    model = create_model()
    model.load_weights(WEIGHTS_FILE)

    sorted_files_iterator = sorted(glob.glob(train_IMAGES))

    for filename in sorted_files_iterator:
        unscaled = cv2.imread(filename)

        image = cv2.resize(unscaled, (IMAGE_SIZE, IMAGE_SIZE))

        region, class_id = model.predict(x=np.array([image]))
        region = region[0]

        x0 = int(region[0] * image_width / IMAGE_SIZE)
        y0 = int(region[1] * image_height / IMAGE_SIZE)

        x1 = int((region[0] + region[2]) * image_width / IMAGE_SIZE)
        y1 = int((region[1] + region[3]) * image_height / IMAGE_SIZE)

        class_id = np.argmax(class_id, axis=1)

        label = class_names_LOOKUP[class_id[0]]

        cv2.rectangle(unscaled, (x0, y0), (x1, y1), (0, 0, 255), 1)
        cv2.putText(unscaled, "class: {}".format(label), (x0, y0),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2,
                    cv2.LINE_AA)
        cv2.imshow("image", unscaled)

        cv2.waitKey(1000)
        cv2.destroyAllWindows()
Esempio n. 9
0
 def test_create_model(self):
   """Tests creating model."""
   model = train.create_model(model_cls=models._ResNet1, half_precision=False)  # pylint: disable=protected-access
   params, state = train.initialized(random.PRNGKey(0), 224, model)
   variables = {'params': params, **state}
   x = random.normal(random.PRNGKey(1), (8, 224, 224, 3))
   y = model.apply(variables, x, train=False)
   self.assertEqual(y.shape, (8, 1000))
Esempio n. 10
0
 def test_create_model(self):
   """Tests creating model."""
   model = train.create_model(half_precision=False)
   params, state = train.initialized(random.PRNGKey(0), 224, model)
   variables = {'params': params, **state}
   x = random.normal(random.PRNGKey(1), (8, 224, 224, 3))
   y = model.apply(variables, x, train=False)
   self.assertEqual(y.shape, (8, 1000))
def init():
    create_folders()
    print('Loading model...')
    model = create_model(SLICE_SIZE, len(GENRES))    
    model.load(MODEL_FILE_NAME)
    print('Model loaded.')

    return model
Esempio n. 12
0
def load_model():
    global MODEL
    dataset, ids_from_chars, chars_from_ids = train.get_data(
        'discord_data.txt')
    model = train.create_model(ids_from_chars)
    train.restore(
        model, 20,
        os.path.join('./training_checkpoints_discord_2', "ckpt_{epoch}.ckpt"))
    MODEL = train.OneStep(model, chars_from_ids, ids_from_chars)
Esempio n. 13
0
def debug_questions(data_dict, q_num_set):
  w_f_stats = w_freq_stats(data_dict)
  tf.reset_default_graph()
  with tf.Session() as session:
    # with tf.device('/cpu:0'):
    model = train.create_model(session, True, data_dict)
    for qn in q_num_set:
      debug_question(session, model, qn, 'dev', w_f_stats)
      print(u'=' * 80)
Esempio n. 14
0
def load_model(training_dir, device):
    with open(os.path.join(training_dir, 'parameters.json'), 'r') as f:
        jf = json.load(f)
        seg_model = jf['SEGMENTATION_MODEL']
        num_classes = int(jf['NUM_CLASSES'])
    model = train.create_model(num_classes=num_classes, model_name=seg_model)
    model.load_state_dict(torch.load(os.path.join(training_dir, 'model.pth')))
    model.to(device)
    return model.eval(), num_classes
Esempio n. 15
0
def load_checkpoint(filepath):
    checkpoint = torch.load(filepath)
    model = create_model(architecture=checkpoint['architecture'],
                         hidden_size=checkpoint['hidden_size'], 
                         p_dropout=checkpoint['p_dropout'])
    model.load_state_dict(checkpoint['state_dict'])
    model.class_to_idx = checkpoint['class_to_idx']
    model.idx_to_class = dict([(model.class_to_idx[cls], cls) for cls in model.class_to_idx])
    return model
Esempio n. 16
0
def optimize(df: pd.DataFrame):
    X, Y = df.iloc[:, :INPUT_SIZE], df.iloc[:, INPUT_SIZE:]

    for num_layers in NUM_LAYERS:
        for num_units in NUM_UNITS:
            name = '{}-{}'.format(num_layers, num_units)
            print(name)
            tb = TensorBoard(log_dir='logs/{}'.format(name))
            model = create_model(num_layers, num_units)
            model.fit(X, Y, epochs=20, validation_split=0.1, callbacks=[tb])
Esempio n. 17
0
 def test_create_model(self):
     model, state = train.create_model(random.PRNGKey(0), 8, 224,
                                       jnp.float32)
     x = random.normal(random.PRNGKey(1), (8, 224, 224, 3))
     with nn.stateful(state) as new_state:
         y = model(x)
     state = jax.tree_map(onp.shape, state.as_dict())
     new_state = jax.tree_map(onp.shape, new_state.as_dict())
     self.assertEqual(state, new_state)
     self.assertEqual(y.shape, (8, 1000))
Esempio n. 18
0
    def test_train_one_step(self):
        batch = train.get_batch(128)
        rng = random.PRNGKey(0)

        model = train.create_model(rng)
        optimizer = train.create_optimizer(model, 0.003)
        optimizer, train_metrics = train.train_step(optimizer, batch)

        self.assertLessEqual(train_metrics['loss'], 5)
        self.assertGreaterEqual(train_metrics['accuracy'], 0)
Esempio n. 19
0
def main():
    model = create_model(IMAGE_SIZE, ALPHA)
    model.load_weights(WEIGHTS_FILE)

    ious = []

    lbl = pd.read_csv('with_labels.csv')
    # lbl2 = pd.read_csv('custom_labeled.csv')
    # lbl=pd.concat([lbl1, lbl2[1:]], ignore_index=True)
    labels = lbl[:VALIDATION_DATASET_SIZE]
    # labels=lbl[1000:]

    for image_name, x1, y1, x2, y2 in labels.values:

        image_path = os.path.join(IMAGE_DIR, image_name)
        image_ = cv2.imread(image_path)
        height, width = image_.shape[:2]

        xmin = int(x1 * IMAGE_SIZE)
        ymin = int(y1 * IMAGE_SIZE)
        xmax = int(x2 * IMAGE_SIZE)
        ymax = int(y2 * IMAGE_SIZE)

        box1 = [xmin, ymin, xmax, ymax]

        image = cv2.resize(image_, (IMAGE_SIZE, IMAGE_SIZE))
        region = model.predict(x=np.array([image]))[0]
        xmin, ymin, xmax, ymax = region

        box2 = [xmin, ymin, xmax, ymax]
        iou_ = iou(box1, box2)
        ious.append(iou_)
        if DEBUG:
            if iou_ < 0.7:
                print("IoU for {} is {}".format(image_name, iou_))

                cv2.rectangle(image_, (int(xmin / IMAGE_SIZE * width),
                                       int(ymin / IMAGE_SIZE * height)),
                              (int(xmax / IMAGE_SIZE * width),
                               int(ymax / IMAGE_SIZE * height)), (0, 0, 255),
                              1)
                cv2.rectangle(image_, (int(box1[0] / IMAGE_SIZE * width),
                                       int(box1[1] / IMAGE_SIZE * height)),
                              (int(box1[2] / IMAGE_SIZE * width),
                               int(box1[3] / IMAGE_SIZE * height)),
                              (0, 255, 0), 1)

                cv2.imshow("image", image_)
                cv2.waitKey(0)
                cv2.destroyAllWindows()

    np.set_printoptions(suppress=True)
    print("\nAvg IoU: {}".format(np.mean(ious)))
    print("Highest IoU: {}".format(np.max(ious)))
    print("Lowest IoU: {}".format(np.min(ious)))
def main(args):

    task_name = args.task_name.lower()
    processor = reader.MatchProcessor(data_dir=args.data_dir,
                                      task_name=task_name,
                                      vocab_path=args.vocab_path,
                                      max_seq_len=args.max_seq_len,
                                      do_lower_case=args.do_lower_case)

    num_labels = len(processor.get_labels())
    infer_data_generator = processor.data_generator(batch_size=args.batch_size,
                                                    phase='test',
                                                    epoch=1,
                                                    shuffle=False)
    num_test_examples = processor.get_num_examples(phase='test')
    main_program = fluid.default_main_program()

    feed_order, loss, probs, accuracy, num_seqs = create_model(
        args, num_labels=num_labels, is_prediction=True)

    if args.use_cuda:
        place = fluid.CUDAPlace(0)
        dev_count = fluid.core.get_cuda_device_count()
    else:
        place = fluid.CPUPlace()
        dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))

    exe = fluid.Executor(place)
    exe.run(framework.default_startup_program())

    if args.init_checkpoint:
        init_pretraining_params(exe, args.init_checkpoint, main_program)

    feed_list = [
        main_program.global_block().var(var_name) for var_name in feed_order
    ]
    feeder = fluid.DataFeeder(feed_list, place)

    out_scores = open(args.output, 'w')
    for batch_id, data in enumerate(infer_data_generator()):
        results = exe.run(fetch_list=[probs],
                          feed=feeder.feed(data),
                          return_numpy=True)
        for elem in results[0]:
            out_scores.write(str(elem[1]) + '\n')

    out_scores.close()
    if args.save_inference_model_path:
        model_path = args.save_inference_model_path
        fluid.io.save_inference_model(model_path,
                                      feed_order,
                                      probs,
                                      exe,
                                      main_program=main_program)
def main():
    config = configparser.ConfigParser()
    config.read('config.ini', 'UTF-8')
    dataset_type = config.get('dataset', 'type')
    logger.info('loading {}'.format(dataset_type))
    if dataset_type == 'mpii':
        _, test_set = get_mpii_dataset(
            insize=parse_size(config.get('model_param', 'insize')),
            image_root=config.get(dataset_type, 'images'),
            annotations=config.get(dataset_type, 'annotations'),
            train_size=config.getfloat(dataset_type, 'train_size'),
            min_num_keypoints=config.getint(dataset_type, 'min_num_keypoints'),
            seed=config.getint('training_param', 'seed'),
        )
    elif dataset_type == 'coco':
        # 已经将原来的图片换成固定大小
        test_set = get_coco_dataset(
            insize=parse_size(config.get('model_param', 'insize')),
            image_root=config.get(dataset_type, 'val_images'),
            annotations=config.get(dataset_type, 'val_annotations'),
            min_num_keypoints=config.getint(dataset_type, 'min_num_keypoints'),
        )
    else:
        raise Exception('Unknown dataset {}'.format(dataset_type))

    model = create_model(config)

    ## 生成用于计算mAP的gt_KPs、pred_KPs, gt_bbox
    mAP = [[], [], []]
    # 测试多张图片
    for i in range(30):
        #  pdb()
        idx = random.choice(range(len(test_set)))
        image = test_set.get_example(idx)['image']
        gt_kps = test_set.get_example(idx)['keypoints']
        # coco person   mpii head
        gt_bboxs = test_set.get_example(idx)['bbox']
        humans = estimate(model,
                        image.astype(np.float32))
        mAP[0].append(gt_kps)
        mAP[1].append(humans)
        mAP[2].append(gt_bboxs)
        pil_image = Image.fromarray(image.transpose(1, 2, 0).astype(np.uint8))
        pil_image = draw_humans(
            keypoint_names=model.keypoint_names,
            edges=model.edges,
            pil_image=pil_image,
            humans=humans
        )

        pil_image.save('results/result{}.png'.format(i), 'PNG')

    gene_json(mAP)
Esempio n. 22
0
    def test_train_one_step(self):
        batch = train.get_batch(128)
        rng = random.PRNGKey(0)

        with nn.stochastic(rng):
            model = train.create_model(nn.make_rng())
            optimizer = train.create_optimizer(model, 0.003)
            optimizer, train_metrics = train.train_step(
                optimizer, batch, nn.make_rng())

        self.assertLessEqual(train_metrics['loss'], 5)
        self.assertGreaterEqual(train_metrics['accuracy'], 0)
Esempio n. 23
0
 def test_train_one_epoch(self):
     train_ds, test_ds = train.get_datasets()
     input_rng = onp.random.RandomState(0)
     model = train.create_model(random.PRNGKey(0))
     optimizer = train.create_optimizer(model, 0.1, 0.9)
     optimizer, train_metrics = train.train_epoch(optimizer, train_ds, 128,
                                                  0, input_rng)
     self.assertLessEqual(train_metrics['loss'], 0.27)
     self.assertGreaterEqual(train_metrics['accuracy'], 0.92)
     loss, accuracy = train.eval_model(optimizer.target, test_ds)
     self.assertLessEqual(loss, 0.06)
     self.assertGreaterEqual(accuracy, 0.98)
Esempio n. 24
0
def main():
    model = create_model(train=False)

    images = np.load(IMAGES_FILE_NAME)
    model(tf.convert_to_tensor(np.array([images[0]]), dtype=float))

    start = time.perf_counter()

    for i in range(25):
        model(tf.convert_to_tensor(np.array([images[i]]), dtype=float))

    end = time.perf_counter()
    print("elapsed", end - start)
Esempio n. 25
0
def main():
    argparser = argparse.ArgumentParser(description='Test the Classifier')
    argparser.add_argument('data_directory',
                           help='The directory of the image to classify.')
    argparser.add_argument('checkpoint',
                           help='The directory for loading checkpoints.')
    argparser.add_argument('--top_k',
                           type=int,
                           default=5,
                           help='Return top KK most likely classes.')
    argparser.add_argument('--category_names',
                           default='cat_to_name.json',
                           help='mapping of categories to real names.')
    argparser.add_argument('--gpu', action='store_true', help='Enable gpu')
    args = argparser.parse_args()
    checkpoint_file = args.checkpoint
    image_path = args.data_directory
    topk = args.top_k
    category_names = args.category_names
    gpu_enabled = args.gpu
    with open(category_names, 'r') as f:
        cat_to_name = json.load(f)
    checkpoint = torch.load(checkpoint_file)
    device = 'cpu'
    if gpu_enabled:
        if torch.cuda.is_available():
            device = 'cuda'
        else:
            print("gpu is not available")
    if checkpoint['device'] != device:
        print(
            "Error! The trained state dict was working in {} environment, but the current environment is {}."
            .format(checkpoint['device'], device))
    model, classifier_name, hidden_layers = create_model(
        checkpoint['model_name'], hidden_layers=checkpoint['hidden_layers'])
    model.to(device)
    learning_rate = checkpoint['learning_rate']
    model_classifier_state_dict = checkpoint['model_classifier_state_dict']
    if classifier_name == 'classifier':
        model.classifier.load_state_dict(model_classifier_state_dict)
        optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)
    elif classifier_name == 'fc':
        model.fc.load_state_dict(model_classifier_state_dict)
        optimizer = optim.Adam(model.fc.parameters(), lr=learning_rate)
        #optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    model.class_to_idx = checkpoint['class_to_idx']
    probs, classes = predict(image_path, model, topk, device)
    for i in range(topk):
        print("flower name: {}, probability: {:.2f}%.".format(
            cat_to_name[classes[i]], probs[i] * 100))
Esempio n. 26
0
def main(args):

    task_name = args.task_name.lower()
    processors = {
        'match': reader.MatchProcessor,
    }

    processor = processors[task_name](data_dir=args.data_dir,
                                      vocab_path=args.vocab_path,
                                      max_seq_len=args.max_seq_len,
                                      do_lower_case=args.do_lower_case)
    num_labels = len(processor.get_labels())
    infer_data_generator = processor.data_generator(
        batch_size=args.batch_size,
        phase='dev',
        epoch=args.epoch,
        shuffle=False)

    main_program = fluid.default_main_program()
    feed_order, loss, probs, accuracy, num_seqs = create_model(
                args,
                num_labels=num_labels)

    if args.use_cuda: 
        place = fluid.CUDAPlace(0)
    else:
        place = fluid.CPUPlace()

    exe = fluid.Executor(place)
    exe.run(framework.default_startup_program())

    if args.init_checkpoint: 
        init_pretraining_params(exe, args.init_checkpoint, main_program)

    feed_list = [
        main_program.global_block().var(var_name) for var_name in feed_order
        ]
    feeder = fluid.DataFeeder(feed_list, place)

    label_list = []
    for batch_id, data in enumerate(infer_data_generator()): 
        results = exe.run(
                fetch_list=[probs],
                feed=feeder.feed(data),
                return_numpy=True)
        for elem in results[0]: 
            label_list.append(str(elem[1]))

    return label_list
Esempio n. 27
0
    def load(self, checkpoint_path, model_name='tacotron'):
        print('Constructing model: %s' % model_name)
        inputs = tf.placeholder(tf.int32, [1, None], 'inputs')
        input_lengths = tf.placeholder(tf.int32, [1], 'input_lengths')
        with tf.variable_scope('model') as scope:
            self.model = create_model(model_name, hparams)
            self.model.initialize(inputs, input_lengths)
            self.wav_output = audio.inv_spectrogram_tensorflow(
                self.model.linear_outputs[0])

        print('Loading checkpoint: %s' % checkpoint_path)
        self.session = tf.Session()
        self.session.run(tf.global_variables_initializer())
        saver = tf.train.Saver()
        saver.restore(self.session, checkpoint_path)
Esempio n. 28
0
def decode(enc_path, dec_path):

    # Only allocate part of the gpu memory when predicting.
    # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
    # config = tf.ConfigProto(gpu_options=gpu_options)

    with tf.Session() as sess:
        # Create model and load parameters.
        model = create_model(sess, True)
        model.batch_size = 1  # We decode one sentence at a time.

        enc_vocab, _ = initialize_vocabulary(enc_path)
        _, rev_dec_vocab = initialize_vocabulary(dec_path)

        # Decode from standard input.
        sys.stdout.write("> ")
        sys.stdout.flush()
        sentence = sys.stdin.readline()
        while sentence:
            # Get token-ids for the input sentence.
            token_ids = sentence_to_token_ids(tf.compat.as_bytes(sentence),
                                              enc_vocab)
            # Which bucket does it belong to?
            bucket_id = min([
                b for b in xrange(len(_buckets))
                if _buckets[b][0] > len(token_ids)
            ])
            # Get a 1-element batch to feed the sentence to the model.
            encoder_inputs, decoder_inputs, target_weights = model.get_batch(
                {bucket_id: [(token_ids, [])]}, bucket_id)
            # Get output logits for the sentence.
            _, _, output_logits = model.step(sess, encoder_inputs,
                                             decoder_inputs, target_weights,
                                             bucket_id, True)
            # This is a greedy decoder - outputs are just argmaxes of output_logits.
            outputs = [
                int(np.argmax(logit, axis=1)) for logit in output_logits
            ]
            # If there is an EOS symbol in outputs, cut them at that point.
            if EOS_ID in outputs:
                outputs = outputs[:outputs.index(EOS_ID)]
            # Print out French sentence corresponding to outputs.
            print(" ".join([
                tf.compat.as_str(rev_dec_vocab[output]) for output in outputs
            ]))
            print("> ", end="")
            sys.stdout.flush()
            sentence = sys.stdin.readline()
Esempio n. 29
0
def main():
    """
    Main function.
    Does the following step by step:
    * Load images (from which to extract cat faces) from SOURCE_DIR
    * Initialize model (as trained via train_cat_face_locator.py)
    * Loads and prepares images for the model.
    * Uses trained model to predict locations of cat faces.
    * Projects face coordinates onto original images
    * Marks faces in original images.
    * Saves each marked image.
    """
    parser = argparse.ArgumentParser(description="Apply a trained cat face locator " \
                                                  "model to images.")
    parser.add_argument("--dataset", required=True, help="Path to the images directory.")
    parser.add_argument("--weights", required=False, default=SAVE_WEIGHTS_CHECKPOINT_FILEPATH,
                        help="Filepath to the weights of the model.")
    parser.add_argument("--output", required=False, default="apply_model_output",
                        help="Filepath to the directory in which to save the output.")
    args = parser.parse_args()

    # load images
    filepaths = get_image_filepaths([args.dataset])
    filenames = [os.path.basename(fp) for fp in filepaths] # will be used during saving
    nb_images = len(filepaths)
    X = np.zeros((nb_images, MODEL_IMAGE_HEIGHT, MODEL_IMAGE_WIDTH, 3), dtype=np.float32)
    for i, fp in enumerate(filepaths):
        image = ndimage.imread(fp, mode="RGB")
        image = misc.imresize(image, (MODEL_IMAGE_HEIGHT, MODEL_IMAGE_WIDTH))
        X[i] = image / 255.0
    X = np.rollaxis(X, 3, 1)

    # assure that dataset is not empty
    assert X.shape[0] > 0, X.shape

    # create model
    model = create_model(MODEL_IMAGE_HEIGHT, MODEL_IMAGE_WIDTH, "mse", Adam())
    model.load_weights(args.weights)

    # predict positions of faces
    preds = model.predict(X, batch_size=BATCH_SIZE)

    # Draw predicted rectangles and save
    print("Saving images...")
    for idx, (y, x, half_height, half_width) in enumerate(preds):
        img = draw_predicted_rectangle(X[idx], y, x, half_height, half_width)
        filepath = os.path.join(WRITE_TO_DIR, filenames[idx])
        misc.imsave(filepath, img)
Esempio n. 30
0
def main():
    model = create_model()
    images = np.load(IMAGES_FILE_NAME)
    model(tf.convert_to_tensor(np.array([images[0]]), dtype=float))

    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.bind((TCP_IP, TCP_PORT))

    sock.listen(1)

    conn, address = sock.accept()

    connectionHandler = ConnectionHandler(conn, address, model)
    connectionHandler.run()

    sock.close()
Esempio n. 31
0
def freq_trained_words(data_dict):
  tf.reset_default_graph()
  with tf.Session() as session:
    # with tf.device('/cpu:0'):
    model = train.create_model(session, True, data_dict)
    A = session.run([model.A])[0]

  w_d_freq, w_val_freq, w_q_freq, w_ans_freq = w_freq_stats(data_dict)
  vocab = data_dict['vocab']
  results = sorted([(np.linalg.norm(A[i]), w_d_freq[i], w_val_freq[i],
                     w_q_freq[i], w_ans_freq[i], k)
                    for k, i in vocab.items()
                    if (w_ans_freq[i] + w_q_freq[i] > 5 or
                        w_val_freq[i] + w_d_freq[i] > 50)],
                   reverse=False)
  return zip(*results)[-1][1:]
Esempio n. 32
0
def main():
    """Main function, estimates the performance of a model on the training,
    validation und test datasets."""
    # handle arguments from command line
    parser = argparse.ArgumentParser()
    parser.add_argument("identifier", help="Identifier of the experiment of " \
                                           "which to load the weights.")
    parser.add_argument("--images", required=True,
                        help="Filepath to the 'faces/' subdirectory in the " \
                             "'Labeled Faces in the Wild grayscaled and " \
                             "cropped' dataset.")
    args = parser.parse_args()
    validate_identifier(args.identifier, must_exist=True)

    if not os.path.isdir(args.images):
        raise Exception("The provided filepath to the dataset seems to not " \
                        "exist.")

    # Load:
    #  1. Validation set,
    #  2. Training set,
    #  3. Test set
    # We will test on each one of them.
    # Results from training and validation set are already known, but will be
    # shown in more detail here.
    # Additionally, we need to load train and val datasets to make sure that
    # no image contained in them is contained in the test set.
    print("Loading validation set...")
    pairs_val = get_image_pairs(args.images, VALIDATION_COUNT_EXAMPLES,
                                pairs_of_same_imgs=False, ignore_order=True,
                                exclude_images=list(), seed=SEED,
                                verbose=False)
    assert len(pairs_val) == VALIDATION_COUNT_EXAMPLES
    X_val, y_val = image_pairs_to_xy(pairs_val, height=INPUT_HEIGHT,
                                     width=INPUT_WIDTH)

    print("Loading training set...")
    pairs_train = get_image_pairs(args.images, TRAIN_COUNT_EXAMPLES,
                                  pairs_of_same_imgs=False, ignore_order=True,
                                  exclude_images=pairs_val, seed=SEED,
                                  verbose=False)
    assert len(pairs_train) == TRAIN_COUNT_EXAMPLES
    X_train, y_train = image_pairs_to_xy(pairs_train, height=INPUT_HEIGHT,
                                         width=INPUT_WIDTH)

    print("Loading test set...")
    pairs_test = get_image_pairs(args.images, TEST_COUNT_EXAMPLES,
                                 pairs_of_same_imgs=False, ignore_order=True,
                                 exclude_images=pairs_val+pairs_train,
                                 seed=SEED, verbose=False)
    assert len(pairs_test) == TEST_COUNT_EXAMPLES
    X_test, y_test = image_pairs_to_xy(pairs_test, height=INPUT_HEIGHT,
                                       width=INPUT_WIDTH)
    print("")

    # Plot dataset skew
    print("Plotting dataset skew. (Only for pairs of images showing the " \
          "same person.)")
    print("More unequal bars mean that the dataset is more skewed (towards " \
          "very few people).")
    print("Close the chart to continue.")
    plot_dataset_skew(
        pairs_train, pairs_val, pairs_test,
        only_y_same=True,
        show_plot_windows=SHOW_PLOT_WINDOWS
    )

    print("Creating model...")
    model, _ = create_model()
    (success, last_epoch) = load_weights(model, SAVE_WEIGHTS_DIR,
                                         args.identifier)
    if not success:
        raise Exception("Could not successfully load model weights")
    print("Loaded model weights of epoch '%s'" % (str(last_epoch)))

    # If we just do one run over a set (training/val/test) we will not augment
    # the images (ia_noop). If we do multiple runs, we will augment images in
    # each run (ia).
    ia_noop = ImageAugmenter(INPUT_WIDTH, INPUT_HEIGHT)
    ia = ImageAugmenter(INPUT_WIDTH, INPUT_HEIGHT,
                        hflip=True, vflip=False,
                        scale_to_percent=1.1,
                        scale_axis_equally=False,
                        rotation_deg=20,
                        shear_deg=6,
                        translation_x_px=4,
                        translation_y_px=4)

    # ---------------
    # Run the tests on the train/val/test sets.
    # We will do a standard testing with one run over each set.
    # We will also do a non-standard test for val and test set where we do
    # multiple runs over the same images and augment them each time. Then
    # we will average the predictions for each pair over the runs to come
    # to a final conclusion.
    # Using augmentation seems to improve the results very slightly
    # (<1% difference).
    # ---------------

    # only 1 run for training set, as 10 or more runs would take quite long
    # when tested, 10 runs seemed to improve the accuracy by a tiny amount
    print("-------------")
    print("Training set results (averaged over 1 run)")
    print("-------------")
    evaluate_model(model, X_train, y_train, ia_noop, 1)
    print("")

    print("-------------")
    print("Validation set results (averaged over 1 run)")
    print("-------------")
    evaluate_model(model, X_val, y_val, ia_noop, 1)
    print("")

    print("-------------")
    print("Validation set results (averaged over 50 runs)")
    print("-------------")
    evaluate_model(model, X_val, y_val, ia, 50)

    if TEST_COUNT_EXAMPLES > 0:
        print("-------------")
        print("Test set results (averaged over 1 run)")
        print("-------------")
        evaluate_model(model, X_test, y_test, ia_noop, 1)
        print("")

        print("-------------")
        print("Test set results (averaged over 50 runs)")
        print("-------------")
        evaluate_model(model, X_test, y_test, ia, 25)

    print("Finished.")