Exemple #1
0
    def __init__(self, config):
        self.net = densenet121(num_classes=config['num_classes'],
                               drop_prob=config['drop_prob'])
        self.config = config
        self.epochs = config['epochs']
        self.use_cuda = config['use_cuda']
        if self.use_cuda:
            self.net = self.net.cuda()

        run_timestamp = datetime.now().strftime("%Y%b%d-%H%M%S")
        self.ckpt_path = os.path.join(config['ckpt_path'], run_timestamp)
        if config['logger']:
            if not os.path.exists(self.ckpt_path):
                os.makedirs(self.ckpt_path)

            self.logger = Logger(
                os.path.join(self.ckpt_path, 'densenet121.log')).get_logger()
            self.logger.info(">>>The net is:")
            self.logger.info(self.net)
            self.logger.info(">>>The config is:")
            self.logger.info(json.dumps(self.config, indent=2))
        if config['use_tensorboard']:
            self.run_path = os.path.join(config['run_path'], run_timestamp)
            if not os.path.exists(self.run_path):
                os.makedirs(self.run_path)
                self.writer = SummaryWriter(self.run_path)
Exemple #2
0
def load_model_tf(parameters):
    # Setup model params
    if (parameters["device_type"] == "gpu") and tf.test.is_gpu_available():
        device_str = "/device:GPU:{}".format(parameters["gpu_number"])
    else:
        device_str = "/cpu:0"

    # Setup Graph
    graph = tf.Graph()
    with graph.as_default():
        with tf.device(device_str):
            x = tf.placeholder(tf.float32, [None, 256, 256, 3])
            with slim.arg_scope(
                    densenet_arg_scope(weight_decay=0.0, data_format='NHWC')):
                densenet121_net, end_points = densenet121(
                    x,
                    num_classes=parameters["number_of_classes"],
                    data_format='NHWC',
                    is_training=False,
                )
            y_logits = densenet121_net[:, 0, 0, :]
            y = tf.nn.softmax(y_logits)

    # Load weights
    sess = tf.Session(graph=graph,
                      config=tf.ConfigProto(gpu_options=tf.GPUOptions(
                          per_process_gpu_memory_fraction=0.333)))
    with open(parameters["tf_torch_weights_map_path"]) as f:
        tf_torch_weights_map = json.loads(f.read())

    with sess.as_default():
        torch_weights = torch.load(parameters["initial_parameters"])
        match_dict = construct_densenet_match_dict(
            tf_variables=tf_utils.get_tf_variables(graph,
                                                   batch_norm_key="BatchNorm"),
            torch_weights=torch_weights,
            tf_torch_weights_map=tf_torch_weights_map)
        sess.run(tf_utils.construct_weight_assign_ops(match_dict))

    return sess, x, y
Exemple #3
0
    def extract_features(self, preprocessed_inputs):
        """Extract features from preprocessed inputs.

        Args:
          preprocessed_inputs: a [batch, height, width, channels] float tensor
            representing a batch of images.

        Returns:
          feature_maps: a list of tensors where the ith tensor has shape
            [batch, height_i, width_i, depth_i]
        """
        # Make sure that input is in correct format with rank 4.
        preprocessed_inputs.get_shape().assert_has_rank(4)
        shape_assert = tf.Assert(
            tf.logical_and(
                tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),
                tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),
            ['image size must at least be 33 in both height and width.'])

        with tf.control_dependencies([shape_assert]):
            with slim.arg_scope(self._conv_hyperparams):
                with tf.variable_scope('densenet121',
                                       reuse=self._reuse_weights) as scope:
                    _, image_features = densenet.densenet121(
                        preprocessed_inputs,
                        num_classes=1000,
                        data_format='NHWC',
                        is_training=True,
                        reuse=None)
                    import pdb
                    pdb.set_trace()
                    # Insert scale transfer module
                    image_features = scale_transfer_module_densent_121(
                        image_features)
        # return a list of feature maps
        return image_features.values()
Exemple #4
0
    def __init__(self, num_classes, train_layers=None, weights_path='DEFAULT'):
        """Create the graph of the densenet_121 model.
        """

        # Parse input arguments into class variables
        if weights_path == 'DEFAULT':
            self.WEIGHTS_PATH = "./pre_trained_models/densenet_121.ckpt"
        else:
            self.WEIGHTS_PATH = weights_path
        self.train_layers = train_layers

        with tf.variable_scope("input"):
            self.image_size = densenet.densenet121.default_image_size
            self.x_input = tf.placeholder(
                tf.float32, [None, self.image_size, self.image_size, 3],
                name="x_input")
            self.y_input = tf.placeholder(tf.float32, [None, num_classes],
                                          name="y_input")
            self.learning_rate = tf.placeholder(tf.float32,
                                                name="learning_rate")

        # train
        with arg_scope(densenet.densenet_arg_scope()):
            self.logits, _ = densenet.densenet121(self.x_input,
                                                  num_classes=num_classes,
                                                  is_training=True,
                                                  reuse=tf.AUTO_REUSE)

        # validation
        with arg_scope(densenet.densenet_arg_scope()):
            self.logits_val, _ = densenet.densenet121(
                self.x_input,
                num_classes=num_classes,
                is_training=False,
                reuse=tf.AUTO_REUSE,
            )

        with tf.name_scope("loss"):
            self.loss = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits_v2(
                    logits=self.logits, labels=self.y_input))
            self.loss_val = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits_v2(
                    logits=self.logits_val, labels=self.y_input))

        with tf.name_scope("train"):

            self.global_step = tf.Variable(0,
                                           name="global_step",
                                           trainable=False)
            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

            var_list = [
                v for v in tf.trainable_variables()
                if v.name.split('/')[-2] in train_layers
                or v.name.split('/')[-3] in train_layers
            ]
            gradients = tf.gradients(self.loss, var_list)
            self.grads_and_vars = list(zip(gradients, var_list))
            optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)

            with tf.control_dependencies(update_ops):
                self.train_op = optimizer.apply_gradients(
                    grads_and_vars=self.grads_and_vars,
                    global_step=self.global_step)

        with tf.name_scope("probability"):
            self.probability = tf.nn.softmax(self.logits_val,
                                             name="probability")

        with tf.name_scope("prediction"):
            self.prediction = tf.argmax(self.logits_val, 1, name="prediction")

        with tf.name_scope("accuracy"):
            correct_prediction = tf.equal(self.prediction,
                                          tf.argmax(self.y_input, 1))
            self.accuracy = tf.reduce_mean(tf.cast(correct_prediction,
                                                   "float"),
                                           name="accuracy")
Exemple #5
0
        break
      image_files.append(data[:-1])
#    image_files = random.sample(image_names, sample_num)
    for i in image_files:
#        GT_dict[i] = i.split('/')[3]
        GT_num[i] = i.split('\\')[3]
        image_input = tf.read_file(i)
        image = tf.image.decode_jpeg(image_input, channels=3)
        user_images.append(image)
        processed_image = densenet_preprocessing.preprocess_image(image, image_size, image_size, is_training=False)
        user_processed_images.append(processed_image)

    processed_images = tf.expand_dims(processed_image, 0)

    with slim.arg_scope(densenet.densenet_arg_scope()):
        logits, _ = densenet.densenet121(user_processed_images, num_classes=5, is_training=False)
    probabilities = tf.nn.softmax(logits)

    init_fn = slim.assign_from_checkpoint_fn(
        os.path.join(checkpoints_dir, 'model.ckpt-21031'),
        slim.get_model_variables('densenet121'))
        
    with tf.Session() as sess:
        init_fn(sess)
        probabilities = sess.run(probabilities)

#    names = os.listdir("tmp/captcha/test_photos2")
    names = ['bus', 'car', 'cat', 'dog', 'ship']
    names.sort()
    #names=['normal', 'adenoma', 'adenocarcinoma']
    for files in range(sample_num):
    def __init__(self, options):
        num_classes = options.NUM_CLASSES

        with tf.variable_scope("input"):
            self.image_size = options.IMAGE_SIZE
            self.x_input = tf.placeholder(
                tf.float32, [None, self.image_size, self.image_size, 3],
                name="x_input")
            self.y_input = tf.placeholder(tf.float32, [None, num_classes],
                                          name="y_input")
            self.learning_rate = tf.placeholder(tf.float32,
                                                name="learning_rate")
            self.keep_prob = None

        if options.PHASE == 'train':

            if train_layers == 'default':
                self.train_layers = self.DEFAULT_TRAIN_LAYERS
            else:
                self.train_layers = train_layers

            # train
            with arg_scope(densenet.densenet_arg_scope()):
                self.logits, _ = densenet.densenet121(self.x_input,
                                                      num_classes=num_classes,
                                                      is_training=True,
                                                      reuse=tf.AUTO_REUSE)
            self.logits = tf.squeeze(self.logits, [1, 2])

            # validation
            with arg_scope(densenet.densenet_arg_scope()):
                self.logits_val, _ = densenet.densenet121(
                    self.x_input,
                    num_classes=num_classes,
                    is_training=False,
                    reuse=tf.AUTO_REUSE)
            self.logits_val = tf.squeeze(self.logits_val, [1, 2])

            with tf.name_scope("loss"):
                self.loss = tf.reduce_mean(
                    tf.nn.softmax_cross_entropy_with_logits_v2(
                        logits=self.logits, labels=self.y_input))
                self.loss_val = tf.reduce_mean(
                    tf.nn.softmax_cross_entropy_with_logits_v2(
                        logits=self.logits_val, labels=self.y_input))

            with tf.name_scope("train"):
                self.global_step = tf.Variable(0,
                                               name="global_step",
                                               trainable=False)
                update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

                var_list = [
                    v for v in tf.trainable_variables()
                    if v.name.split('/')[-2] in self.train_layers
                    or v.name.split('/')[-3] in self.train_layers
                ]
                gradients = tf.gradients(self.loss, var_list)
                self.grads_and_vars = list(zip(gradients, var_list))
                # optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
                opt_name = options.OPTIMIZER
                if opt_name == 'sgd':
                    optimizer = tf.train.GradientDescentOptimizer(
                        self.learning_rate)
                elif opt_name == 'adam':
                    optimizer = tf.train.AdamOptimizer(self.learning_rate)
                else:
                    raise ValueError('Optimizer not supported')

                with tf.control_dependencies(update_ops):
                    self.train_op = optimizer.apply_gradients(
                        grads_and_vars=self.grads_and_vars,
                        global_step=self.global_step)
        else:
            with arg_scope(densenet.densenet_arg_scope()):
                self.logits_val, _ = densenet.densenet121(
                    self.x_input,
                    num_classes=num_classes,
                    is_training=False,
                    reuse=tf.AUTO_REUSE)
            self.logits_val = tf.squeeze(self.logits_val, [1, 2])

        with tf.name_scope("probability"):
            self.probability = tf.nn.softmax(self.logits_val,
                                             name="probability")

        with tf.name_scope("prediction"):
            self.prediction = tf.argmax(self.logits_val, 1, name="prediction")

        with tf.name_scope("accuracy"):
            correct_prediction = tf.equal(self.prediction,
                                          tf.argmax(self.y_input, 1))
            self.accuracy = tf.reduce_mean(tf.cast(correct_prediction,
                                                   "float"),
                                           name="accuracy")

        print(self.logits.shape.as_list())
        print(self.logits_val.shape.as_list())