Exemplo n.º 1
0
 def __init__(self, opts, n_images_per_batch):
     self.dirdata = os.path.join(opts.root_of_datasets, opts.dataset_name)
     self.img_extension, self.classnames = tools.process_dataset_config(
         os.path.join(self.dirdata, 'dataset_info.xml'))
     self.img_extension = '.' + self.img_extension
     self.max_image_size = opts.max_image_size
     self.percent_of_data = opts.percent_of_data
     self.shuffle_data = opts.shuffle_data
     self.n_images_per_batch = n_images_per_batch
Exemplo n.º 2
0
 def generate_graph(self):
     dirdata = os.path.join(self.opts.root_of_datasets, self.opts.dataset_name)
     img_extension, self.classnames = tools.process_dataset_config(os.path.join(dirdata, 'dataset_info.xml'))
     self.nclasses = len(self.classnames)
     self.multi_cell_arch = MultiCellArch.MultiCellArch(self.opts.multi_cell_opts, self.nclasses, self.opts.outdir, self.opts.th_conf, self.classnames)
     self.define_inputs_and_labels()
     self.localizations, self.softmax, self.common_representations, pc, dc, cm = self.multi_cell_arch.make(self.inputs)
     self.restore_fn = tf.contrib.framework.assign_from_checkpoint_fn(self.opts.weights_file, tf.global_variables())
     self.saver = tf.train.Saver(name='net_saver', max_to_keep=1000000)
Exemplo n.º 3
0
def speed_test_10():
    dataset_dir = os.path.join(root_of_datasets, 'ImageNet')
    img_extension, classnames = tools.process_dataset_config(
        os.path.join(dataset_dir, 'dataset_info.xml'))
    nclasses = len(classnames)
    labels_file = os.path.join(dataset_dir, 'train_labels.txt')
    filenames, labels = read_paths_and_labels(labels_file, dataset_dir,
                                              percent_of_data, shuffle_data)
    batched_dataset = build_dataset(filenames, labels)
    iterator = tf.data.Iterator.from_structure(batched_dataset.output_types,
                                               batched_dataset.output_shapes)
    x, y = iterator.get_next(name='iterator-output')
    train_init_op = iterator.make_initializer(batched_dataset,
                                              name='train_init_op')

    resnet_v1 = tf.contrib.slim.nets.resnet_v1
    with slim.arg_scope(
            tf.contrib.slim.python.slim.nets.resnet_utils.resnet_arg_scope()
    ):  # This arg scope is mandatory. Otherwise we checkpoint file will fail at loading
        logits, _ = resnet_v1.resnet_v1_50(x,
                                           num_classes=nclasses,
                                           is_training=True,
                                           scope='resnet_v1_50')
        logits = tf.squeeze(logits, axis=[1, 2])

    loss = losses.cross_entropy(y, logits)
    tf.summary.scalar("loss", loss)

    optimizer = tf.train.MomentumOptimizer(learning_rate, momentum)
    update_bn_stats_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_bn_stats_ops):
        train_op = optimizer.minimize(loss, name='train_op')

    init_op = tf.global_variables_initializer()

    outdir = os.path.join(tools.get_base_dir(), 'tensorboard')
    if os.path.exists(outdir):
        shutil.rmtree(outdir)
        os.makedirs(outdir)
    else:
        os.makedirs(outdir)
    with tf.Session() as sess:
        merged, summary_writer, tensorboard_url = prepare_tensorboard(
            sess, outdir)
        sess.run(init_op)
        sess.run(train_init_op)
        for i in range(n_steps):
            ini = time.time()
            _, summaryOut = sess.run(fetches=[train_op, merged])
            summary_writer.add_summary(summaryOut, i)
            fin = time.time()
            print('Step ' + str(i) + ' done in ' + str(fin - ini) + ' s.')
Exemplo n.º 4
0
 def __init__(self, input_width, input_height, opts, multi_cell_arch,
              batch_size, split):
     super(InteractiveDataReaderFromDataset,
           self).__init__(input_width, input_height, opts.resize_method)
     self.multi_cell_arch = multi_cell_arch
     self.opts = opts
     self.batch_size = batch_size
     self.dirdata = os.path.join(self.opts.root_of_datasets,
                                 self.opts.dataset_name)
     dataset_info_path = os.path.join(self.dirdata, 'dataset_info.xml')
     self.img_extension, self.classnames = tools.process_dataset_config(
         dataset_info_path)
     self.filenames = self.get_filenames(split)
Exemplo n.º 5
0
 def __init__(self, input_width, input_height, args):
     self.input_width = input_width
     self.input_height = input_height
     self.preprocess_type = args.preprocess_opts.type
     self.mean = args.preprocess_opts.mean
     self.range_min = args.preprocess_opts.range_min
     self.range_max = args.preprocess_opts.range_max
     self.resize_method = args.resize_method
     self.img_extension, self.classnames = tools.process_dataset_config(
         args.dataset_info_path)
     if args.preprocess_opts.mean == 'vgg':
         self.mean = [123.68, 116.78, 103.94]
     else:
         raise Exception('Preprocess mean not recognized.')
Exemplo n.º 6
0
    def __init__(self, input_shape, args):

        self.batch_size = args.batch_size
        self.input_width = input_shape[0]
        self.input_height = input_shape[1]
        self.num_workers = args.num_workers
        self.buffer_size = args.buffer_size

        self.resize_function = Resizer.ResizerSimple(
            self.input_width,
            self.input_height).get_resize_func(args.resize_method)

        self.percent_of_data = args.percent_of_data
        self.max_image_size = args.max_image_size
        self.nimages_train = None
        self.nimages_val = None
        self.train_init_op = None
        self.val_init_op = None
        self.dirdata = os.path.join(args.root_of_datasets, args.dataset_name)
        self.img_extension, self.classnames = tools.process_dataset_config(
            os.path.join(self.dirdata, 'dataset_info.xml'))
        self.img_extension = '.' + self.img_extension
        self.nclasses = len(self.classnames)
        self.outdir = args.outdir
        self.write_network_input = args.write_network_input

        self.shuffle_data = args.shuffle_data

        if self.img_extension == '.jpg' or self.img_extension == '.JPEG':
            self.parse_function = parse_jpg
        elif self.img_extension == '.png':
            self.parse_function = parse_png
        else:
            raise Exception('Images format not recognized.')

        self.data_aug_opts = args.data_aug_opts

        if self.data_aug_opts.apply_data_augmentation:
            bugs_class_id = -1
            for i in range(len(self.classnames)):
                if self.classnames[i] == 'BUGS':
                    bugs_class_id = i
                    break
            data_augmenter = DataAugmentation.ClassificationDataAugmentation(
                args, self.input_width, self.input_height, bugs_class_id)
            self.data_aug_func = data_augmenter.data_augmenter
        return
Exemplo n.º 7
0
def speed_test_8():
    dataset_dir = os.path.join(os.path.dirname(tools.get_base_dir()),
                               'datasets', 'coco-animals')
    img_extension, classnames = tools.process_dataset_config(
        os.path.join(dataset_dir, 'dataset_info.xml'))
    nclasses = len(classnames)
    labels_file = os.path.join(dataset_dir, 'train_labels.txt')
    filenames, labels = read_paths_and_labels(labels_file, dataset_dir)
    batched_dataset = build_dataset(filenames, labels)
    iterator = tf.data.Iterator.from_structure(batched_dataset.output_types,
                                               batched_dataset.output_shapes)
    x, y = iterator.get_next(name='iterator-output')
    train_init_op = iterator.make_initializer(batched_dataset,
                                              name='train_init_op')

    resnet_v1 = tf.contrib.slim.nets.resnet_v1
    with slim.arg_scope(
            tf.contrib.slim.python.slim.nets.resnet_utils.resnet_arg_scope()
    ):  # This arg scope is mandatory. Otherwise we checkpoint file will fail at loading
        logits, _ = resnet_v1.resnet_v1_50(x,
                                           num_classes=nclasses,
                                           is_training=True,
                                           scope='resnet_v1_50')
        logits = tf.squeeze(logits, axis=[1, 2])

    loss = losses.cross_entropy(y, logits)

    optimizer = tf.train.MomentumOptimizer(learning_rate, momentum)
    update_bn_stats_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_bn_stats_ops):
        train_op = optimizer.minimize(loss, name='train_op')

    init_op = tf.global_variables_initializer()

    with tf.Session() as sess:
        sess.run(init_op)
        sess.run(train_init_op)
        for i in range(n_steps):
            ini = time.time()
            sess.run(fetches=[train_op])
            fin = time.time()
            print('Step ' + str(i) + ' done in ' + str(fin - ini) + ' s.')
Exemplo n.º 8
0
    def generate_graph(self):
        dirdata = os.path.join(self.opts.root_of_datasets,
                               self.opts.dataset_name)
        img_extension, self.classnames = tools.process_dataset_config(
            os.path.join(dirdata, 'dataset_info.xml'))
        self.nclasses = len(self.classnames)
        self.single_cell_arch = SingleCellArch.SingleCellArch(
            self.opts.single_cell_opts, self.nclasses, self.opts.outdir)
        self.define_inputs_and_labels()
        _, self.loss, self.metrics = self.single_cell_arch.make(
            self.inputs, self.labels, self.filenames)
        self.model_variables = [n.name for n in tf.global_variables()]
        if self.opts.l2_regularization > 0:
            self.loss += L2RegularizationLoss(self.opts)
        self.loss = tf.identity(
            self.loss, name='loss'
        )  # This is just a workaround to rename the loss function to 'loss'
        # Tensorboard:
        tf.summary.scalar("final_loss", self.loss)

        self.build_optimizer()

        self.define_initializer()
        self.saver = tf.train.Saver(name='net_saver', max_to_keep=1000000)