예제 #1
0
    def __call__(self, feature_map, training=True, keep_prob=1.0):
        weight_decay = FLAGS.weight_decay
        activation_fn = tf.nn.relu

        end_points = {}
        with slim.arg_scope(
                inception.inception_v1_arg_scope(
                    weight_decay=FLAGS.weight_decay)):
            with tf.variable_scope("", reuse=self.reuse):
                with tf.variable_scope(None, 'KeyValueModule',
                                       [feature_map]) as scope:
                    with slim.arg_scope([slim.batch_norm, slim.dropout],
                                        is_training=training):
                        key = slim.conv2d(feature_map,
                                          256, [1, 1],
                                          scope='KeyRep')
                        key = tf.reshape(key, [
                            key.get_shape().as_list()[0], -1,
                            key.get_shape().as_list()[3]
                        ])

                        value = slim.conv2d(feature_map,
                                            1024, [1, 1],
                                            scope='ValueRep')
                        value = tf.reshape(value, [
                            value.get_shape().as_list()[0], -1,
                            value.get_shape().as_list()[3]
                        ])

                        self.reuse = True
        return key, value
예제 #2
0
    def build(self, dataset, image_height, image_width, num_classes, is_training):

        images, labels = self._load_batch(dataset, height=image_height, width=image_width)

        # Create the model, use the default arg scope to configure the batch norm parameters.
        with slim.arg_scope(inception_v1_arg_scope()):
            logits, end_points = inception_v1(images, num_classes=num_classes, is_training=is_training)

        # Specify the loss function:
        one_hot_labels = slim.one_hot_encoding(labels, num_classes)
        loss = slim.losses.softmax_cross_entropy(logits, one_hot_labels)

        # Create some summaries to visualize the training process:
        tf.summary.scalar('losses/Total Loss', loss)

        if is_training:
            # Specify the optimizer and create the train op:
            optimizer = tf.train.RMSPropOptimizer(learning_rate=0.01)
            variables_to_train = []
            for scope in self.trainable_scopes:
                variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
                variables_to_train.extend(variables)

            variables = slim.get_model_variables("InceptionV1/Logits")
            exec_op = slim.learning.create_train_op(loss, optimizer, variables_to_train=variables)

        else:
            exec_op = end_points['Predictions']

        if self.checkpoints_file is not None:
            self.init_fn = self._get_init_fn()

        return exec_op, tf.get_default_graph()
예제 #3
0
 def __set_up_inception(self):
     self.num_clss = self.config["Model"]["InceptionV1"]["num_clss"]
     self.map_path = self.config["Model"]["InceptionV1"]["map_path"]
     self.X = tf.placeholder(tf.float32,
                             shape=[
                                 None, self.config["image"]["height"],
                                 self.config["image"]["width"],
                                 self.config["image"]["num_channels"]
                             ])
     with slim.arg_scope(inception.inception_v1_arg_scope()):
         self.logits, self.end_points = inception.inception_v1(
             self.X,
             num_classes=self.num_clss,
             is_training=False,
             m_w=self.config["mantissa_width"],
             e_w=self.config["exponent_width"],
             s_w=self.config["shared_exponent_width"],
             alter=self.config["quantize"],
             debug=self.config["debug_layer"])
     self.predictions = self.end_points["Predictions"]
     self.variables_to_restore = slim.get_variables_to_restore()
     self.init_assign_fn = slim.assign_from_checkpoint_fn(
         self.config["to_ckpt"] +
         self.config["Model"]["InceptionV1"]["ckpt_path"],
         self.variables_to_restore)
예제 #4
0
 def testModelHasExpectedNumberOfParameters(self):
     batch_size = 5
     height, width = 224, 224
     inputs = tf.random_uniform((batch_size, height, width, 3))
     with slim.arg_scope(inception.inception_v1_arg_scope()):
         inception.inception_v1_base(inputs)
     total_params, _ = slim.model_analyzer.analyze_vars(slim.get_model_variables())
     self.assertAlmostEqual(5607184, total_params)
예제 #5
0
  def testNoBatchNormScaleByDefault(self):
    height, width = 224, 224
    num_classes = 1000
    inputs = tf.placeholder(tf.float32, (1, height, width, 3))
    with slim.arg_scope(inception.inception_v1_arg_scope()):
      inception.inception_v1(inputs, num_classes, is_training=False)

    self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), [])
    def testNoBatchNormScaleByDefault(self):
        height, width = 224, 224
        num_classes = 1000
        inputs = tf.placeholder(tf.float32, (1, height, width, 3))
        with slim.arg_scope(inception.inception_v1_arg_scope()):
            inception.inception_v1(inputs, num_classes, is_training=False)

        self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), [])
def evaluate_inceptionV1_bin(batch_size):
    with tf.Graph().as_default():
        tf.logging.set_verbosity(tf.logging.INFO)
        global_step = slim.get_or_create_global_step()

        # Summary


        dataset = plantVSnoplant.get_split('validation', plantVSnoplant_data_dir)
        images,labels = load_batch(dataset, batch_size = batch_size,  k=0, r=[], is_training =False)

        with slim.arg_scope(inception.inception_v1_arg_scope(weight_decay=weight_decay)):
                logits, _ = inception.inception_v1(images, num_classes=2, is_training=False)


        total_output = []
        total_labels = []
        total_images = []

        with tf.Session() as sess:
            coord = tf.train.Coordinator()
            saver = tf.train.Saver()
            saver.restore(sess, tf.train.latest_checkpoint(train_inceptionV1_bin_dir))
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)
            for i in range(batch_size):
                print('step: %d/%d' % (i, batch_size))
                o, l , image= sess.run([logits, labels, images[0]])
                o = tf.reduce_sum(o, 0)/float(5)
                total_output.append(o)
                total_labels.append(l[0])
                total_images.append(image)
            coord.request_stop()
            coord.join(threads)


            total_output = tf.stack(total_output,0)
            total_output = tf.nn.softmax(total_output)
            labels = tf.constant(total_labels)
            total_images = sess.run(tf.stack(total_images,0))

            top1_op = tf.nn.in_top_k(total_output, labels, 1)
            top1_acc = sess.run(tf.reduce_mean(tf.cast(top1_op, tf.float32)))
            print(top1_acc)


            top5_op = tf.nn.in_top_k(total_output, labels, 5)
            top5_acc = sess.run(tf.reduce_mean(tf.cast(top5_op, tf.float32)))
            print(top5_acc)

            accuracy1_sum = tf.summary.scalar('top1_accuracy', top1_acc)
            accuracy5_sum = tf.summary.scalar('top5_accuracy', top5_acc)
            images_sum = tf.summary.image("input_images", total_images, batch_size)

            accuracy1, accuracy5, image_batch, step = sess.run([accuracy1_sum,accuracy5_sum,images_sum, global_step])
            writer = tf.summary.FileWriter(eval_inceptionV1_bin_dir)
            writer.add_summary(accuracy1, step)
            writer.add_summary(accuracy5, step)
            writer.add_summary(image_batch)
예제 #8
0
 def testModelHasExpectedNumberOfParameters(self):
     batch_size = 5
     height, width = 224, 224
     inputs = tf.random_uniform((batch_size, height, width, 3))
     with slim.arg_scope(inception.inception_v1_arg_scope()):
         inception.inception_v1_base(inputs)
     total_params, _ = slim.model_analyzer.analyze_vars(
         slim.get_model_variables())
     self.assertAlmostEqual(5607184, total_params)
예제 #9
0
파일: predict.py 프로젝트: jgeller1/IVF
def predict(train_dir, test_dir, output_file, num_classes):
	""" 
	Loads weights from a TF model and makes predictions.

	Arguments:

	train_dir: directory of trained model

	test_dir: directory of test images (split into folders by class)

	output_file: output file to store predictions

	num_classes: number of classes of prediction

	Returns:

	Outpiuts a file output_file with predictions  
	"""

	train_dir = train_dir
	test_path = test_dir
	output = output_file
	nb_classes = num_classes

	print('Start to read images!')
	image_list = get_test_images(test_path)
	processed_images = tf.placeholder(tf.float32, shape=(None, image_size, image_size, 3))

	with slim.arg_scope(inception.inception_v1_arg_scope()):
		logits, _ = inception.inception_v1(processed_images, num_classes=nb_classes, is_training=False)

	def predict_fn(images):
	    return session.run(probabilities, feed_dict={processed_images: images})

	probabilities = tf.nn.softmax(logits)

	#Loads in latest training checkpoint
	checkpoint_path = tf.train.latest_checkpoint(train_dir)
	init_fn = slim.assign_from_checkpoint_fn(checkpoint_path,slim.get_variables_to_restore())
	init_fn(session)
	print('Start to transform images!')
	images = transform_img_fn(image_list)

	fto = open(output, 'w')
	print('Start doing predictions!')
	preds = predict_fn(images)
	print (len(preds))
	for p in range(len(preds)):
		print (image_list[p], preds[p,:], np.argmax(preds[p,:]))
		fto.write(image_list[p])
		for j in range(len(preds[p,:])):
			fto.write('\t' + str(preds[p, j]))
		fto.write('\n')

	fto.close()
def train_inceptionV1(log_steps,save_summaries_sec,save_interval_secs,num_iterations = num_iterations_inception):
    with tf.Graph().as_default():
        tf.logging.set_verbosity(tf.logging.INFO)

        summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))

        global_step = slim.get_or_create_global_step()

        dataset = plantclef2015.get_split('train', plant_data_dir)
        images,labels = load_batch(dataset, batch_size = batch_size, k=num_patches_inception, r=r_rotations_inception)

        # Add Images to summaries
        summaries.add(tf.summary.image("input_images", images, batch_size))

        # Create the models
        with slim.arg_scope(inception.inception_v1_arg_scope(weight_decay=weight_decay)):
            logits, _ = inception.inception_v1(images, num_classes=1000, is_training=True)


        # Specify the loss function:
        one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)

        loss(logits, one_hot_labels)
        #slim.losses.softmax_cross_entropy(logits, one_hot_labels)
        total_loss = slim.losses.get_total_loss()

        # Create some summaries to visualize the training process:
        for variable in slim.get_model_variables():
            summaries.add(tf.summary.histogram(variable.op.name, variable))
        summaries.add(tf.summary.scalar('losses/Total_Loss', total_loss))

        # Specify the optimizer and create the train op:
        learning_rate = tf.train.exponential_decay(start_learning_rate, global_step,updating_iteration_for_learning_rate, updating_gamma, staircase=True)

        optimizer = tf.train.MomentumOptimizer(learning_rate= learning_rate, momentum=momentum)

        train_op = slim.learning.create_train_op(total_loss, optimizer)

        summaries.add(tf.summary.scalar('training/Learning_Rate', learning_rate))

        summary_op = tf.summary.merge(list(summaries), name='summary_op')

        # Run the training:
        final_loss = slim.learning.train(
            train_op,
            logdir=train_inceptionV1_dir,
            log_every_n_steps=log_steps,
            global_step=global_step,
            number_of_steps= num_iterations,
            summary_op=summary_op,
            init_fn=get_init_fn_V1(),
            save_summaries_secs=save_summaries_sec,
            save_interval_secs=save_interval_secs)

    print('Finished training. Last batch loss %f' % final_loss)
예제 #11
0
    def adapt_pretrain_imagenet_to_flower():
        import os
        from datasets import flowers
        from nets import inception
        from preprocessing import inception_preprocessing

        slim = tf.contrib.slim
        image_size = inception.inception_v1.default_image_size

        def get_init_fn():
            checkpoint_exclude_scopes = [
                'InceptionV1/Logits', 'InceptionV1/AuxLogits'
            ]
            exclusions = [scope.strip() for scope in checkpoint_exclude_scopes]
            variables_to_restore = []
            for var in slim.get_model_variables():
                excluded = False
                for exclusion in exclusions:
                    if var.op.name.startswith(exclusion):
                        excluded = True
                        break
                if not excluded:
                    variables_to_restore.append(var)

            return slim.assign_from_checkpoint_fn(
                os.path.join(checkpoint_dir, 'inception_v1.ckpt'),
                variables_to_restore)

        with tf.Graph().as_default():
            tf.logging.set_verbosity(tf.logging.INFO)
            dataset = flowers.get_split('train', flower_data_dir)
            images, _, labels = load_batch(dataset,
                                           height=image_size,
                                           width=image_size)

            with slim.arg_scope(inception.inception_v1_arg_scope()):
                logits, _ = inception.inception_v1(
                    images, num_classes=dataset.num_classes, is_training=True)

                one_hot_labels = slim.one_hot_encoding(labels,
                                                       dataset.num_classes)
                slim.losses.softmax_cross_entropy(logits, one_hot_labels)
                total_loss = slim.losses.get_total_loss()

                tf.scalar_summary('losses/Total Loss', total_loss)

                optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
                train_op = slim.learning.create_train_op(total_loss, optimizer)

                final_loss = slim.learning.train(train_op,
                                                 logdir=train_dir,
                                                 init_fn=get_init_fn(),
                                                 number_of_steps=2)

        print("Finished training. Las batch loss %f" % final_loss)
def extract_feat(frames_name):
    """
    Reference:
        1. Vasili's codes
        2. https://github.com/tensorflow/models/issues/429#issuecomment-277885861
    """

    slim_dir = "/home/xyang/workspace/models/research/slim"
    checkpoints_dir = slim_dir + "/pretrain"
    checkpoints_file = checkpoints_dir + '/inception_v1.ckpt'
    batch_size = 256

    sys.path.append(slim_dir)
    from nets import inception
    import tensorflow as tf
    slim = tf.contrib.slim
    image_size = inception.inception_v1.default_image_size

    feat = []
    with tf.Graph().as_default():
        input_batch = tf.placeholder(dtype=tf.uint8,
                                     shape=(batch_size, 300, 300, 3))
        resized_images = tf.image.resize_images(
            tf.image.convert_image_dtype(input_batch, dtype=tf.float32),
            [image_size, image_size]
            )
        preprocessed_images = tf.multiply(tf.subtract(resized_images, 0.5), 2.0)
        
        # Create the model, use the default arg scope to configure
        # the batch norm parameters.
        with slim.arg_scope(inception.inception_v1_arg_scope()):
            logits, endpoints = inception.inception_v1(preprocessed_images,
                                                        num_classes=1001,
                                                        is_training=False)
        pool5 = endpoints['AvgPool_0a_7x7']

        with tf.Session() as sess:
            saver = tf.train.Saver()
            saver.restore(sess, checkpoints_file)

            for i in range(0, len(frames_name), batch_size):
                print (i, '/', len(frames_name))
                current_batch = np.zeros((batch_size, 300, 300, 3), dtype=np.uint8)
                for j in range(batch_size):
                    if i+j == len(frames_name):
                        j -= 1
                        break
                    img = Image.open(frames_name[i+j]).convert('RGB').resize((300,300))
                    current_batch[j] = np.array(img)

                feat_batch = sess.run(pool5, feed_dict={input_batch: current_batch})
                feat_batch = np.squeeze(feat_batch)
                feat.append(feat_batch[:j+1].astype('float32'))

    return np.concatenate(feat, axis=0)
예제 #13
0
def get_network_logits_and_endpoints(network, images):
  if(network == 'inceptionV1'):
    with slim.arg_scope(inception.inception_v1_arg_scope(weight_decay=weight_decay)):
      logits, endpoints = inception.inception_v1(images, num_classes=1000, is_training=False)

  elif(network == 'vgg16'):

    with slim.arg_scope(vgg.vgg_arg_scope(weight_decay=weight_decay)):
      logits, endpoints = vgg.vgg_16(images, num_classes=1000, is_training=False)

  return logits,endpoints
예제 #14
0
  def testBatchNormScale(self):
    height, width = 224, 224
    num_classes = 1000
    inputs = tf.placeholder(tf.float32, (1, height, width, 3))
    with slim.arg_scope(
        inception.inception_v1_arg_scope(batch_norm_scale=True)):
      inception.inception_v1(inputs, num_classes, is_training=False)

    gamma_names = set(
        v.op.name for v in tf.global_variables('.*/BatchNorm/gamma:0$'))
    self.assertGreater(len(gamma_names), 0)
    for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'):
      self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
예제 #15
0
  def testBatchNormScale(self):
    height, width = 224, 224
    num_classes = 1000
    inputs = tf.placeholder(tf.float32, (1, height, width, 3))
    with slim.arg_scope(
        inception.inception_v1_arg_scope(batch_norm_scale=True)):
      inception.inception_v1(inputs, num_classes, is_training=False)

    gamma_names = set(
        v.op.name for v in tf.global_variables('.*/BatchNorm/gamma:0$'))
    self.assertGreater(len(gamma_names), 0)
    for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'):
      self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
예제 #16
0
    def apply_pretrained_model():
        import numpy as np
        import os
        import tensorflow as tf
        import urllib2
        from datasets import imagenet
        from nets import inception
        from preprocessing import inception_preprocessing

        slim = tf.contrib.slim

        batch_size = 3
        image_size = inception.inception_v1.default_image_size

        with tf.Graph().as_default():
            url = 'https://upload.wikimedia.org/wikipedia/commons/7/70/EnglishCockerSpaniel_simon.jpg'
            image_string = urllib2.urlopen(url).read()
            image = tf.image.decode_jpeg(image_string, channels=3)
            processed_image = inception_preprocessing.preprocess_image(
                image, image_size, image_size, is_training=False)
            processed_images = tf.expand_dims(processed_image, 0)

            with slim.arg_scope(inception.inception_v1_arg_scope()):
                logits, _ = inception.inception_v1(processed_images,
                                                   num_classes=1001,
                                                   is_training=False)
            probabilities = tf.nn.softmax(logits)

            init_fn = slim.assign_from_checkpoint_fn(
                os.path.join(checkpoint_dir, 'inception_v1.ckpt'),
                slim.get_model_variables('InceptionV1'))

            with tf.Session() as sess:
                init_fn(sess)
                np_image, probabilities = sess.run([image, probabilities])
                probabilities = probabilities[0, 0:]
                sorted_inds = [
                    i[0] for i in sorted(enumerate(-probabilities),
                                         key=lambda x: x[1])
                ]

            plt.figure()
            plt.imshow(np_image.astype(np.uint8))
            plt.axis('off')
            plt.show()

            names = imagenet.create_readable_names_for_imagenet_labels()
            for i in range(5):
                index = sorted_inds[i]
                print('Probabity %0.2f%% => [%s]' %
                      (probabilities[index], names[index]))
예제 #17
0
def eval(adv_imgs, labels, x_inputs, total_score, total_count):
    image = (((adv_imgs + 1.0) * 0.5) * 255.0)
    processed_imgs_inv1 = preprocess_for_model(image, 'inception_v1')
    with slim.arg_scope(inception.inception_v1_arg_scope()):
        logits_inc_v1, end_points_inc_v1 = inception.inception_v1(
            processed_imgs_inv1, num_classes=FLAGS.num_classes, is_training=False, scope='InceptionV1', reuse=True)
    pred_inception = tf.argmax(end_points_inc_v1['Predictions'], 1)

    # rescale pixle range from [-1, 1] to [0, 255] for resnet_v1 and vgg's input
    processed_imgs_res_v1_50 = preprocess_for_model(image, 'resnet_v1_50')
    with slim.arg_scope(resnet_v1.resnet_arg_scope()):
        logits_res_v1_50, end_points_res_v1_50 = resnet_v1.resnet_v1_50(
            processed_imgs_res_v1_50, num_classes=FLAGS.num_classes, is_training=False, scope='resnet_v1_50', reuse=True)

    end_points_res_v1_50['logits'] = tf.squeeze(end_points_res_v1_50['resnet_v1_50/logits'], [1, 2])
    end_points_res_v1_50['probs'] = tf.nn.softmax(end_points_res_v1_50['logits'])
    pred_resnet = tf.argmax(end_points_res_v1_50['probs'], 1)

    processed_imgs_inv3 = preprocess_for_model(image, 'inception_v3')
    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
        logits_res_inception_v3, end_points_inception_v3 = inception_v3.inception_v3(
            processed_imgs_inv3, num_classes=FLAGS.num_classes, is_training=False, scope='InceptionV3', reuse=True)
    pred_inception_v3 = tf.argmax(end_points_inception_v3['Predictions'], 1)

    processed_imgs_inv_res = preprocess_for_model(image, 'inception_resnet_v2')
    with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()):
        logits_res_inception_resnet, end_points_inception_resnet = inception_resnet_v2.inception_resnet_v2(
            processed_imgs_inv_res, num_classes=FLAGS.num_classes, is_training=False, scope='InceptionResnetV2')
    pred_ince_res = tf.argmax(end_points_inception_resnet['Predictions'], 1)


    for i in range(adv_imgs.shape[0]):
        def f1(total_score, total_count):
            total_score = tf.add(total_score, 64)
            return total_score, total_count

        def f2(total_score, total_count):
            adv = (((adv_imgs[i] + 1.0) * 0.5) * 255.0)
            ori = (((x_inputs[i] + 1.0) * 0.5) * 255.0)
            diff = tf.reshape(adv, [-1, 3]) - tf.reshape(ori, [-1, 3])
            distance = tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square(diff), axis=1)))
            total_score = tf.add(total_score, distance)
            total_count = tf.add(total_count, 1)
            return total_score, total_count
        total_score, total_count = tf.cond(tf.equal(pred_inception[i], labels[i]), lambda: f2(total_score, total_count), lambda: f1(total_score, total_count))
        total_score, total_count = tf.cond(tf.equal(pred_resnet[i], labels[i]), lambda: f2(total_score, total_count), lambda: f1(total_score, total_count))
        # total_score, total_count = tf.cond(tf.equal(pred_inception_v3[i], labels[i]), lambda: f2(total_score, total_count), lambda: f1(total_score, total_count))
        total_score, total_count = tf.cond(tf.equal(pred_ince_res[i], labels[i]), lambda: f2(total_score, total_count), lambda: f1(total_score, total_count))

    return total_score, total_count
예제 #18
0
 def __call__(self, x_input):
     """Constructs model and return probabilities for given input."""
     reuse = True if self.built else None
     x_input = image_normalize(x_input, normalization_method[0])
     x_input = tf.image.resize_images(x_input, [224, 224])
     with slim.arg_scope(inception.inception_v1_arg_scope()):
         _, end_points = inception.inception_v1(
             x_input,
             num_classes=self.num_classes,
             is_training=False,
             reuse=reuse)
     self.built = True
     output = end_points['Predictions']
     # Strip off the extra reshape op at the output
     probs = output.op.inputs[0]
     return probs
    def model_fn(features, labels, mode):
        from nets import inception
        slim = tf.contrib.slim
        labels = tf.squeeze(labels, axis=1)
        with slim.arg_scope(inception.inception_v1_arg_scope()):
            logits, end_points = inception.inception_v1(features,
                                                        num_classes=2,
                                                        is_training=True)

        if mode == tf.estimator.ModeKeys.TRAIN:
            loss = tf.reduce_mean(
                tf.losses.sparse_softmax_cross_entropy(logits=logits,
                                                       labels=labels))
            return TFEstimatorSpec(mode, predictions=logits, loss=loss)
        else:
            raise NotImplementedError
예제 #20
0
def inception_v1(inputs, is_training, opts):
    with slim.arg_scope(inception.inception_v1_arg_scope(
            weight_decay=opts.weight_decay,
            use_batch_norm=opts.use_batch_norm,
            batch_norm_decay=opts.batch_norm_decay,
            batch_norm_epsilon=opts.batch_norm_epsilon,
            activation_fn=tf.nn.relu)):
        return inception.inception_v1(
            inputs,
            num_classes=opts.num_classes,
            is_training=is_training,
            dropout_keep_prob=opts.dropout_keep_prob,
            prediction_fn=slim.softmax,
            spatial_squeeze=opts.spatial_squeeze,
            reuse=None,
            global_pool=opts.global_pool)
예제 #21
0
    def eval_adapt_pretrained_to_flower():
        import numpy as np
        import tensorflow as tf
        from datasets import flowers
        from nets import inception

        slim = tf.contrib.slim

        image_size = inception.inception_v1.default_image_size
        batch_size = 8

        with tf.Graph().as_default():
            tf.logging.set_verbosity(tf.logging.INFO)
            dataset = flowers.get_split('train', flower_data_dir)
            images, images_raw, labels = load_batch(dataset,
                                                    height=image_size,
                                                    width=image_size)

            with slim.arg_scope(inception.inception_v1_arg_scope()):
                logits, _ = inception.inception_v1(
                    images, num_classes=dataset.num_classes, is_training=True)
                probabilities = tf.nn.softmax(logits)

                checkpoint_path = tf.train.latest_checkpoint(train_dir)
                init_fn = slim.assign_from_checkpoint_fn(
                    checkpoint_path, slim.get_variables_to_restore())
                with tf.Session() as sess:
                    with slim.queues.QueueRunners(sess):
                        sess.run(tf.initialize_local_variables())
                        init_fn(sess)
                        np_probabilities, np_images_raw, np_labels = sess.run(
                            [probabilities, images_raw, labels])

                        for i in xrange(batch_size):
                            image = np_images_raw[i, :, :, :]
                            true_label = np_labels[i]
                            predicted_label = np.argmax(np_probabilities[i, :])
                            predicted_name = dataset.labels_to_names[
                                predicted_label]
                            true_name = dataset.labels_to_names[true_label]

                            plt.figure()
                            plt.imshow(image.astype(np.uint8))
                            plt.title('Ground Truth: [%s], Prediction [%s]' %
                                      (true_name, predicted_name))
                            plt.axis('off')
                            plt.show()
def inception_tst():
    with tf.Graph().as_default():
        url = 'https://upload.wikimedia.org/wikipedia/commons/7/70/EnglishCockerSpaniel_simon.jpg'
        image_string = urllib.urlopen(url).read()
        image = tf.image.decode_jpeg(image_string, channels=3)
        processed_image = inception_preprocessing.preprocess_image(
            image, image_size, image_size, is_training=False)
        processed_images = tf.expand_dims(processed_image, 0)

        # Create the model, use the default arg scope to configure the batch norm parameters.
        with slim.arg_scope(inception.inception_v1_arg_scope()):
            logits, _ = inception.inception_v1(processed_images,
                                               num_classes=1001,
                                               is_training=False)
        probabilities = tf.nn.softmax(logits)

        variables_to_restore = slim.get_variables()
        print(len(variables_to_restore))
        # pprint(variables_to_restore)
        variables_to_restore = slim.get_model_variables()
        print(len(variables_to_restore))
        return

        init_fn = slim.assign_from_checkpoint_fn(
            os.path.join(checkpoints_dir, 'inception_v1.ckpt'),
            slim.get_model_variables('InceptionV1'))

        with tf.Session() as sess:
            init_fn(sess)
            np_image, probabilities = sess.run([image, probabilities])
            probabilities = probabilities[0, 0:]
            sorted_inds = [
                i[0]
                for i in sorted(enumerate(-probabilities), key=lambda x: x[1])
            ]

        plt.figure()
        plt.imshow(np_image.astype(np.uint8))
        plt.axis('off')
        plt.show()

        names = imagenet.create_readable_names_for_imagenet_labels()
        for i in range(5):
            index = sorted_inds[i]
            print('Probability %0.2f%% => [%s]' %
                  (probabilities[index] * 100, names[index]))
예제 #23
0
def predict(train_dir, test_dir, num_classes):
    """ 
	Loads weights from a TF model and makes predictions.

	Arguments:

	train_dir: directory of trained model

	test_dir: directory of test images (split into folders by class)

	num_classes: number of classes of prediction

	Returns:

	Returns logits in the order of test images given and nparray of predictions
	"""

    processed_images = tf.placeholder(tf.float32,
                                      shape=(None, image_size, image_size, 3))

    with slim.arg_scope(inception.inception_v1_arg_scope()):
        logits, _ = inception.inception_v1(processed_images,
                                           num_classes=num_classes,
                                           is_training=False)

    probabilities = tf.nn.softmax(logits)

    def predict_fn(images):
        return session.run(probabilities, feed_dict={processed_images: images})

    #Loads in latest training checkpoint
    checkpoint_path = tf.train.latest_checkpoint(train_dir)
    init_fn = slim.assign_from_checkpoint_fn(checkpoint_path,
                                             slim.get_variables_to_restore())
    init_fn(session)
    image_list = get_test_images(test_dir)
    images = transform_img_fn(image_list)
    predicted_probs = predict_fn(images)

    predictions = np.empty((len(predicted_probs), ))

    for example in range(len(predicted_probs)):
        predictions[example] = np.argmax(predicted_probs[example, :])

    return predicted_probs, predictions
예제 #24
0
def label_test():
    image_size = inception.inception_v1.default_image_size
    batch_size = 12

    with tf.Graph().as_default():
        tf.logging.set_verbosity(tf.logging.INFO)

        dataset = flowers.get_split('validation', flowers_data_dir)
        images, images_raw, labels = load_batch(dataset,
                                                height=image_size,
                                                width=image_size)

        # Create the model, use the default arg scope to configure the batch norm parameters.
        with slim.arg_scope(inception.inception_v1_arg_scope()):
            logits, _ = inception.inception_v1(images,
                                               num_classes=dataset.num_classes,
                                               is_training=True)

        probabilities = tf.nn.softmax(logits)

        checkpoint_path = tf.train.latest_checkpoint(train_dir)
        init_fn = slim.assign_from_checkpoint_fn(
            checkpoint_path, slim.get_variables_to_restore())

        with tf.Session() as sess:
            with slim.queues.QueueRunners(sess):
                sess.run(tf.initialize_local_variables())
                init_fn(sess)
                np_probabilities, np_images_raw, np_labels = sess.run(
                    [probabilities, images, labels])

                for i in range(batch_size):
                    image = np_images_raw[i, :, :, :]
                    true_label = np_labels[i]
                    predicted_label = np.argmax(np_probabilities[i, :])
                    predicted_name = dataset.labels_to_names[predicted_label]
                    true_name = dataset.labels_to_names[true_label]

                    plt.figure()
                    plt.imshow(image / (image.max() - image.min()))
                    #                     plt.imshow(image.astype(np.uint8))
                    plt.title('Ground Truth: [%s], Prediction [%s]' %
                              (true_name, predicted_name))
                    plt.axis('off')
                    plt.show()
예제 #25
0
def load_inception_model(network_config, net_input=None):

    with slim.arg_scope(inception.inception_v1_arg_scope()):

        if net_input is None:
            net_input = tf.placeholder("float",
                                       [None, image_size, image_size, 3],
                                       name='nn_input')

        logits, end_points = inception.inception_v1(net_input,
                                                    num_classes=1001,
                                                    is_training=True)

        init_fn = slim.assign_from_checkpoint_fn(
            os.path.join('./checkpoints', 'inception_v1.ckpt'),
            slim.get_model_variables('InceptionV1'))

        return logits, end_points, init_fn, net_input
예제 #26
0
def eval():
    # This might take a few minutes.
    image_size = inception.inception_v1.default_image_size

    with tf.Graph().as_default():
        #tf.logging.set_verbosity(tf.logging.INFO)

        dataset = flowers.get_split('train', flowers_data_dir)
        images, images_raw, labels = load_batch(dataset,
                                                height=image_size,
                                                width=image_size)

        # Create the model, use the default arg scope to configure the batch norm parameters.
        with slim.arg_scope(inception.inception_v1_arg_scope()):
            logits, _ = inception.inception_v1(images,
                                               num_classes=dataset.num_classes,
                                               is_training=True)

        predictions = tf.argmax(logits, 1)

        checkpoint_path = tf.train.latest_checkpoint(train_dir)
        init_fn = slim.assign_from_checkpoint_fn(
            checkpoint_path, slim.get_variables_to_restore())

        # Define the metrics:
        names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
            'eval/Accuracy':
            slim.metrics.streaming_accuracy(predictions, labels),
            'eval/Recall@5':
            slim.metrics.streaming_sparse_recall_at_k(logits, labels, 5)
        })

        print('Running evaluation Loop...')
        checkpoint_path = tf.train.latest_checkpoint(train_dir)
        metric_values = slim.evaluation.evaluate_once(
            master='',
            checkpoint_path=checkpoint_path,
            logdir=train_dir,
            eval_op=list(names_to_updates.values()),
            final_op=list(names_to_values.values()))

        names_to_values = dict(zip(names_to_values.keys(), metric_values))
        for name in names_to_values:
            print('%s: %f' % (name, names_to_values[name]))
예제 #27
0
    def build(self, dataset, image_height, image_width, num_classes,
              is_training):

        images, labels = self._load_batch(dataset,
                                          height=image_height,
                                          width=image_width)

        # Create the model, use the default arg scope to configure the batch norm parameters.
        with slim.arg_scope(inception_v1_arg_scope()):
            logits, end_points = inception_v1(images,
                                              num_classes=num_classes,
                                              is_training=is_training)

        # Specify the loss function:
        one_hot_labels = slim.one_hot_encoding(labels, num_classes)
        loss = slim.losses.softmax_cross_entropy(logits, one_hot_labels)

        # Create some summaries to visualize the training process:
        tf.summary.scalar('losses/Total Loss', loss)

        if is_training:
            # Specify the optimizer and create the train op:
            optimizer = tf.train.RMSPropOptimizer(learning_rate=0.01)
            variables_to_train = []
            for scope in self.trainable_scopes:
                variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                              scope)
                variables_to_train.extend(variables)

            variables = slim.get_model_variables("InceptionV1/Logits")
            exec_op = slim.learning.create_train_op(
                loss, optimizer, variables_to_train=variables)

        else:
            exec_op = end_points['Predictions']

        if self.checkpoints_file is not None:
            self.init_fn = self._get_init_fn()

        return exec_op, tf.get_default_graph()
예제 #28
0
def fine_tune():
    with tf.Graph().as_default():
        tf.logging.set_verbosity(tf.logging.INFO)

        dataset = flowers.get_split('train', flowers_data_dir)
        images, _, labels = load_batch(dataset,
                                       height=image_size,
                                       width=image_size)

        # Create the model, use the default arg scope to configure the batch norm parameters.
        with slim.arg_scope(inception.inception_v1_arg_scope()):
            logits, _ = inception.inception_v1(images,
                                               num_classes=dataset.num_classes,
                                               is_training=True)

        # Specify the loss function:
        one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)
        slim.losses.softmax_cross_entropy(logits, one_hot_labels)
        total_loss = slim.losses.get_total_loss()

        # Create some summaries to visualize the training process:
        tf.summary.scalar('losses/Total Loss', total_loss)

        # Specify the optimizer and create the train op:
        optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
        train_op = slim.learning.create_train_op(total_loss, optimizer)

        # Run the training:
        final_loss = slim.learning.train(
            train_op,
            logdir=train_dir,
            init_fn=get_init_fn(),
            number_of_steps=1000,  # For speed, we just do 1 epoch
            save_interval_secs=600,
            save_summaries_secs=6000,
            log_every_n_steps=1,
        )

    print('Finished training. Last batch loss %f' % final_loss)
예제 #29
0
    def recognize(self, roi):
        try:
            frame = self.cv_bridge.imgmsg_to_cv2(roi.RequestRoi, "bgr8")
        except CvBridgeError as e:
            print e

        image = tf.convert_to_tensor(frame)
        processed_image = inception_preprocessing.preprocess_image(
            image,
            inception.inception_v1.default_image_size,
            inception.inception_v1.default_image_size,
            is_training=False)
        processed_images = tf.expand_dims(processed_image, 0)

        with slim.arg_scope(inception.inception_v1_arg_scope()):
            logits, _ = inception.inception_v1(processed_images,
                                               num_classes=1001,
                                               is_training=False)
        probabilities = tf.nn.softmax(logits)

        init_fn = slim.assign_from_checkpoint_fn(
            self.model_file, slim.get_model_variables('InceptionV1'))

        with tf.Session() as sess:
            init_fn(sess)
            np_image, probabilities = sess.run([image, probabilities])
            probabilities = probabilities[0, 0:]
            sorted_inds = [
                i[0]
                for i in sorted(enumerate(-probabilities), key=lambda x: x[1])
            ]

        for i in range(5):
            index = sorted_inds[i]
            rospy.loginfo('Probability %0.2f%% => [%s]' %
                          (probabilities[index] * 100, self.names[index]))

        return self.names[sorted_inds[0]].split(',')[0]
예제 #30
0
    def init_train(self):
        with tf.Graph().as_default():
            dataset = self.data_loader.get_split('train', FLAGS.data_dir)
            data_tensors = load_batch(dataset,
                                      height=FLAGS.image_size,
                                      width=FLAGS.image_size)
            images = data_tensors['images']
            labels = data_tensors['labels']

            # Create the model, use the default arg scope to configure the batch norm parameters.
            with slim.arg_scope(inception.inception_v1_arg_scope()):
                logits, _ = inception.inception_v1(
                    images, num_classes=dataset.num_classes, is_training=True)

            labels = tf.subtract(labels, 1)

            # Specify the loss function:
            one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)
            one_hot_labels = tf.cast(one_hot_labels, tf.int64)
            slim.losses.softmax_cross_entropy(logits, one_hot_labels)
            total_loss = slim.losses.get_total_loss()

            # Create some summaries to visualize the training process:
            tf.summary.scalar('losses/Total Loss', total_loss)

            # Specify the optimizer and create the train op:
            optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
            train_op = slim.learning.create_train_op(total_loss, optimizer)

            # Run the training:
            final_loss = slim.learning.train(
                train_op,
                logdir=FLAGS.log_dir,
                init_fn=get_init_fn(),
                number_of_steps=FLAGS.number_of_steps)

        print('Finished training. Last batch loss %f' % final_loss)
예제 #31
0
    def __call__(self,
                 image_input,
                 training=False,
                 keep_prob=1.0,
                 endpoint_name='Mixed_5c'):
        weight_decay = FLAGS.weight_decay
        activation_fn = tf.nn.relu

        end_points = {}
        with slim.arg_scope(
                inception.inception_v1_arg_scope(
                    weight_decay=FLAGS.weight_decay)):
            with tf.variable_scope("", reuse=self.reuse):
                with tf.variable_scope(None, 'InceptionV1',
                                       [image_input]) as scope:
                    with slim.arg_scope([slim.batch_norm, slim.dropout],
                                        is_training=training):
                        net, end_points = inception.inception_v1_base(
                            image_input, scope=scope)
                        feature_map = end_points[endpoint_name]

                        self.reuse = True

        return feature_map
예제 #32
0
def main(_):
    batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
    num_classes = 1001
    ensemble_type = FLAGS.ensemble_type

    tf.logging.set_verbosity(tf.logging.INFO)

    checkpoint_path_list = [
        FLAGS.checkpoint_path_inception_v1, FLAGS.checkpoint_path_inception_v2,
        FLAGS.checkpoint_path_inception_v3, FLAGS.checkpoint_path_inception_v4,
        FLAGS.checkpoint_path_inception_resnet_v2,
        FLAGS.checkpoint_path_resnet_v1_101,
        FLAGS.checkpoint_path_resnet_v1_152,
        FLAGS.checkpoint_path_resnet_v2_101,
        FLAGS.checkpoint_path_resnet_v2_152, FLAGS.checkpoint_path_vgg_16,
        FLAGS.checkpoint_path_vgg_19
    ]
    normalization_method = [
        'default', 'default', 'default', 'default', 'global', 'caffe_rgb',
        'caffe_rgb', 'default', 'default', 'caffe_rgb', 'caffe_rgb'
    ]
    pred_list = []
    for idx, checkpoint_path in enumerate(checkpoint_path_list, 1):
        with tf.Graph().as_default():
            if int(FLAGS.test_idx) == 20 and idx in [3]:
                continue
            if int(FLAGS.test_idx) in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
                                       ] and int(FLAGS.test_idx) != idx:
                continue
            # Prepare graph
            if idx in [1, 2, 6, 7, 10, 11]:
                _x_input = tf.placeholder(tf.float32, shape=batch_shape)
                x_input = tf.image.resize_images(_x_input, [224, 224])
            else:
                _x_input = tf.placeholder(tf.float32, shape=batch_shape)
                x_input = _x_input

            x_input = image_normalize(x_input, normalization_method[idx - 1])

            if idx == 1:
                with slim.arg_scope(inception.inception_v1_arg_scope()):
                    _, end_points = inception.inception_v1(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 2:
                with slim.arg_scope(inception.inception_v2_arg_scope()):
                    _, end_points = inception.inception_v2(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 3:
                with slim.arg_scope(inception.inception_v3_arg_scope()):
                    _, end_points = inception.inception_v3(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 4:
                with slim.arg_scope(inception.inception_v4_arg_scope()):
                    _, end_points = inception.inception_v4(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 5:
                with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
                    _, end_points = inception.inception_resnet_v2(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 6:
                with slim.arg_scope(resnet_v1.resnet_arg_scope()):
                    _, end_points = resnet_v1.resnet_v1_101(x_input,
                                                            num_classes=1000,
                                                            is_training=False)
            elif idx == 7:
                with slim.arg_scope(resnet_v1.resnet_arg_scope()):
                    _, end_points = resnet_v1.resnet_v1_152(x_input,
                                                            num_classes=1000,
                                                            is_training=False)
            elif idx == 8:
                with slim.arg_scope(resnet_v2.resnet_arg_scope()):
                    _, end_points = resnet_v2.resnet_v2_101(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 9:
                with slim.arg_scope(resnet_v2.resnet_arg_scope()):
                    _, end_points = resnet_v2.resnet_v2_152(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 10:
                with slim.arg_scope(vgg.vgg_arg_scope()):
                    _, end_points = vgg.vgg_16(x_input,
                                               num_classes=1000,
                                               is_training=False)
                    end_points['predictions'] = tf.nn.softmax(
                        end_points['vgg_16/fc8'])
            elif idx == 11:
                with slim.arg_scope(vgg.vgg_arg_scope()):
                    _, end_points = vgg.vgg_19(x_input,
                                               num_classes=1000,
                                               is_training=False)
                    end_points['predictions'] = tf.nn.softmax(
                        end_points['vgg_19/fc8'])

            #end_points = tf.reduce_mean([end_points1['Predictions'], end_points2['Predictions'], end_points3['Predictions'], end_points4['Predictions']], axis=0)

            #predicted_labels = tf.argmax(end_points, 1)

            # Run computation
            saver = tf.train.Saver(slim.get_model_variables())
            session_creator = tf.train.ChiefSessionCreator(
                scaffold=tf.train.Scaffold(saver=saver),
                checkpoint_filename_with_path=checkpoint_path,
                master=FLAGS.master)

            pred_in = []
            filenames_list = []
            with tf.train.MonitoredSession(
                    session_creator=session_creator) as sess:
                for filenames, images in load_images(FLAGS.input_dir,
                                                     batch_shape):
                    #if idx in [1,2,6,7,10,11]:
                    #  # 16x299x299x3
                    #  images = zoom(images, (1, 0.7491638795986622, 0.7491638795986622, 1), order=2)
                    filenames_list.extend(filenames)
                    end_points_dict = sess.run(end_points,
                                               feed_dict={_x_input: images})
                    if idx in [6, 7, 10, 11]:
                        end_points_dict['predictions'] = \
                                      np.concatenate([np.zeros([FLAGS.batch_size, 1]),
                                                      np.array(end_points_dict['predictions'].reshape(-1, 1000))],
                                                      axis=1)
                    try:
                        pred_in.extend(end_points_dict['Predictions'].reshape(
                            -1, num_classes))
                    except KeyError:
                        pred_in.extend(end_points_dict['predictions'].reshape(
                            -1, num_classes))
            pred_list.append(pred_in)

    if ensemble_type == 'mean':
        pred = np.mean(pred_list, axis=0)
        labels = np.argmax(
            pred, axis=1
        )  # model_num X batch X class_num ==(np.mean)==> batch X class_num ==(np.argmax)==> batch
    elif ensemble_type == 'vote':
        pred = np.argmax(
            pred_list, axis=2
        )  # model_num X batch X class_num ==(np.mean)==> batch X class_num ==(np.argmax)==> batch
        labels = np.median(pred, axis=0)
    with tf.gfile.Open(FLAGS.output_file, 'w') as out_file:
        for filename, label in zip(filenames_list, labels):
            out_file.write('{0},{1}\n'.format(filename, label))
예제 #33
0
image_size = inception.inception_v1.default_image_size

with tf.Graph().as_default():

    start = time.time()

    image_input = tf.read_file(
        "\\tensorflow-dataset\\test images\\017_267.jpg")
    image = tf.image.decode_jpeg(image_input, channels=3)
    processed_image = inception_preprocessing.preprocess_image(
        image, image_size, image_size, is_training=False)

    processed_images = tf.expand_dims(processed_image, 0)

    with slim.arg_scope(inception.inception_v1_arg_scope()):
        logits, _ = inception.inception_v1(processed_images,
                                           num_classes=17,
                                           is_training=False)
    probabilities = tf.nn.softmax(logits)

    init_fn = slim.assign_from_checkpoint_fn(
        os.path.join(classification_checkpoint_dir, 'model.ckpt-500'),
        slim.get_model_variables('InceptionV1'))

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        init_fn(sess)
        np_image, probabilities = sess.run([image, probabilities])
        probabilities = probabilities[0, 0:]
예제 #34
0
    def Api_trans(self):
        global num
        j = 1
        client_id = "vIj2xvkAVXo8ArNz48NH"  # 개발자센터에서 발급받은 Client ID 값
        client_secret = "xZAQ6OTeHy"  # 개발자센터에서 발급받은 Client Secret 값
        # 요청할 주소
        url = "https://openapi.naver.com/v1/papago/n2mt"

        #이미지 평가 코드
        slim = tf.contrib.slim

        image_size = inception.inception_v1.default_image_size

        for i in range(1, num):
            with tf.Graph().as_default():
                image_input = tf.read_file('./images/' + str(i) + '.jpg')
                image = tf.image.decode_jpeg(image_input, channels=3)
                processed_image = inception_preprocessing.preprocess_image(
                    image, image_size, image_size, is_training=False)
                processed_images = tf.expand_dims(processed_image, 0)

                with slim.arg_scope(inception.inception_v1_arg_scope()):
                    logits, _ = inception.inception_v1(processed_images,
                                                       num_classes=10,
                                                       is_training=False)
                probabilities = tf.nn.softmax(logits)

                init_fn = slim.assign_from_checkpoint_fn(
                    # os.path.join(checkpoints_dir, 'model.ckpt-500'),
                    # os.path.join(checkpoints_dir, 'model.ckpt-1000'),
                    # os.path.join(checkpoints_dir, 'model.ckpt-5000'),
                    os.path.join(checkpoints_dir, 'model.ckpt-5000'),
                    slim.get_model_variables('InceptionV1'))

                with tf.Session() as sess:
                    init_fn(sess)
                    np_image, probabilities = sess.run([image, probabilities])
                    probabilities = probabilities[0, 0:]
                    sorted_inds = [
                        i[0] for i in sorted(enumerate(-probabilities),
                                             key=lambda x: x[1])
                    ]

                #plt.figure()
                #plt.imshow(np_image.astype(np.uint8))
                #plt.axis('off')
                #plt.show()

                names = os.listdir("C:\\SignLanguage2\\SignLanguage2_photos")
                # F:\\SL\SignLanguage3\\SignLanguage_photos
                #for i in range(5):
                index = sorted_inds[0]
                print(
                    str(j) + '번째 결과 : [%s] => %0.2f%%  ' %
                    (names[index], probabilities[index]))
                if (i % 3) == 0:
                    j = j + 1
                #self.edit_text.setText(self.edit_text.toPlainText() + "," + names[index])
                #edit_text.setText("aa")

        # 번역할 언어와 내용
        text = input("번역할 내용을 입력하세요: ")
        encText = urllib.parse.quote(text)
        srcLang = 'en'
        tarLang = 'ko'
        rq_data = "source={}&target={}&text=".format(srcLang,
                                                     tarLang) + encText

        # 웹 요청
        request = urllib.request.Request(url)
        request.add_header("X-Naver-Client-Id", client_id)
        request.add_header("X-Naver-Client-Secret", client_secret)

        # 결과 받아오는 부분
        response = urllib.request.urlopen(request,
                                          data=rq_data.encode("utf-8"))

        # 응답 성공적일 때
        rescode = response.getcode()
        if (rescode == 200):  # 성공
            response_body = response.read()
            rs_data = response_body.decode('utf-8')
            rs_data = json.loads(rs_data)  # 딕셔너리화
            trans_text = rs_data['message']['result']['translatedText']
            print("번역된 내용: ", trans_text)
            return trans_text
        else:  # 실패
            print("Error Code:" + rescode)
예제 #35
0
BATCH_SIZE = 1
EPOCHS = 1

classes = util.pkl_load(workspace.class_pkl)
csv_files = workspace.test_csvs


graph = tf.Graph()
with graph.as_default():
    tf.logging.set_verbosity(tf.logging.INFO)
    fp_placeholder = tf.placeholder(tf.string)
    image = read_one_image(fp_placeholder)
    images = tf.reshape(image, [1, 224, 224, 3])

    # Create the model, use the default arg scope to configure the batch norm parameters.
    with slim.arg_scope(inception.inception_v1_arg_scope()):
        logits, _ = inception.inception_v1(images, num_classes=len(classes), is_training=False)
    probabilities = tf.nn.softmax(logits)

    checkpoint = tf.train.latest_checkpoint(os.path.join(workspace.inception_cpkt, 'log'))
    init_fn = slim.assign_from_checkpoint_fn(
        checkpoint,
        slim.get_model_variables())

with tf.Session(graph=graph) as sess:
    init_fn(sess)
    top1 = 0
    top5 = 0
    total = 0
    # there is prob a better way to do this will all TF but
    # this will do for calculating accuracy.