Beispiel #1
0
    def __init__(self, log_dir, logger, enabled):
        self.writer = None
        self.selected_module = ""

        if enabled:
            log_dir = str(log_dir)

            # Retrieve vizualization writer.
            succeeded = False
            for module in ["torch.utils.tensorboard", "tensorboardX"]:
                try:
                    self.writer = importlib.import_module(module).SummaryWriter(log_dir)
                    succeeded = True
                    break
                except ImportError:
                    succeeded = False
                self.selected_module = module

            if not succeeded:
                message = "Warning: visualization (Tensorboard) is configured to use, but currently not installed on " \
                    "this machine. Please install either TensorboardX with 'pip install tensorboardx', upgrade " \
                    "PyTorch to version >= 1.1 for using 'torch.utils.tensorboard' or turn off the option in " \
                    "the 'config.json' file."
                logger.warning(message)

        self.step = 0
        self.mode = ''

        self.tb_writer_ftns = {
            'add_scalar', 'add_scalars', 'add_image', 'add_images', 'add_audio',
            'add_text', 'add_histogram', 'add_pr_curve', 'add_embedding'
        }
        self.tag_mode_exceptions = {'add_histogram', 'add_embedding'}
            
        self.timer = Timer()
Beispiel #2
0
    def __init__(self, log_dir, logger, enable):
        self.log_dir = log_dir
        self.writer = None
        if enable:
            log_dir = str(log_dir)
            try:
                self.writer = importlib.import_module(
                    'tensorboardX').SummaryWriter(log_dir)
            except ImportError:
                message = (
                    "Warning: TensorboardX visualization is configured "
                    "to use, but currently not installed on this machine. "
                    "Please install the package by 'pip install tensorboardx'"
                    " command or turn off the option "
                    "in the 'config.json' file.")
                logger.warning(message)
        self.step = 0
        self.mode = ''

        self.tb_writer_ftns = [
            'add_scalar', 'add_scalars', 'add_image', 'add_images',
            'add_audio', 'add_text', 'add_histogram', 'add_pr_curve',
            'add_embedding', 'add_figure'
        ]
        self.tag_mode_exceptions = ['add_histogram', 'add_embedding']
        self.timer = Timer()
Beispiel #3
0
def eval_once(
    saver, ckpt_path, imdb, model, step, restore_checkpoint):

  with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
    if restore_checkpoint:
      saver.restore(sess, ckpt_path)

    uninitialized_vars = []
    for var in tf.all_variables():
        try:
            sess.run(var)
        except tf.errors.FailedPreconditionError:
            uninitialized_vars.append(var)
    init_new_vars_op = tf.initialize_variables(uninitialized_vars)
    sess.run(init_new_vars_op)

    num_images = len(imdb.image_idx)
    all_boxes = [[[] for _ in xrange(num_images)]
                 for _ in xrange(imdb.num_classes)]

    # Detection sequence, looping through all images
    _t = {'im_detect': Timer(), 'im_read': Timer(), 'misc': Timer()}
    num_detection = 0.0
    process_t = np.array([])
    for i in xrange(num_images):
        _t['im_read'].tic()
        images, scales = imdb.read_image_batch(shuffle=False)
        _t['im_read'].toc()
        _t['im_detect'].tic()
        t_start=process_time() #Using process time to measure detection time
        det_boxes, det_probs, det_class = sess.run(
            [model.det_boxes, model.det_probs, model.det_class],
            feed_dict={model.image_input:images})
        t_stop = process_time() #Using process time to measure detection time
        process_t = np.append(process_t, t_stop-t_start)
        _t['im_detect'].toc()
        _t['misc'].tic()
        for j in range(len(det_boxes)): # batch
            # rescale
            det_boxes[j, :, 0::2] /= scales[j][0]
            det_boxes[j, :, 1::2] /= scales[j][1]

            det_bbox, score, det_class = model.filter_prediction(
                det_boxes[j], det_probs[j], det_class[j])

            num_detection += len(det_bbox)
            for c, b, s in zip(det_class, det_bbox, score):
                all_boxes[c][i].append(bbox_transform(b) + [s])
            _t['misc'].toc()

    if not os.path.exists(FLAGS.eval_dir + "/" + step):
      os.mkdir(FLAGS.eval_dir + "/" + step)

    #Save all evaluation data
    pickle.dump(all_boxes, open(FLAGS.eval_dir + "/" + step + "/all_boxes.p", "wb"))
    pickle.dump(_t, open(FLAGS.eval_dir + "/" + step + "/_t.p", "wb"))
    pickle.dump(num_detection, open(FLAGS.eval_dir + "/" + step + "/num_detection.p", "wb"))
    pickle.dump(process_t, open(FLAGS.eval_dir + "/" + step + "/process_t.p", "wb"))
Beispiel #4
0
def main():
    timer=Timer()
    config=agentConfig.get_config()
    print(config)
    server = ServerPlugin()
    data = server.check()
    print(data)
    sender=Sender(port=config['recv_port'])
    sender.emit(data)
    print("send finished total cost time:"+str(timer.total()))
    return 0
 def _select_with_gids(self, want_gids):
     t = Timer()
     want_gids = set(want_gids)
     graphs = [g for g in self.gs if g.gid() in want_gids]
     print('Done graphs', t.time_and_clear())
     pairs = {}
     for (gid1, gid2), pair in self.pairs.items():
         # Both g1 and g2 need to be in the (one) train/test/... set.
         if gid1 in want_gids and gid2 in want_gids:
             pairs[(gid1, gid2)] = pair
     print('Done pairs', t.time_and_clear())
     return graphs, pairs
Beispiel #6
0
    def run(self):
        global monitor_data_past

        timer = Timer()

        plugins={}

        count={}

        for pluginName in self.basic_plugin:
            Plugin=get_plugin_class(pluginName)
            plugin=Plugin()
            plugins[pluginName]=plugin
            count[pluginName]=plugin.frequency/self.check_frequency

        for pluginName in self.get_run_plugins():
            Plugin=get_plugin_class(pluginName)
            plugin=Plugin()
            plugins[pluginName]=plugin
            count[pluginName]=plugin.frequency/self.check_frequency

        while(self.run_forever):
            timer.start()

            #do check
            for name,plugin in plugins.items():
                if count[name] != 1:
                    count[name] = count[name] - 1
                else:
                    count[name] = plugin.frequency/self.check_frequency
                    if name == 'LibvirtPlugin':
                        monitor_data_cur,data=plugin.check(monitorData=monitor_data_past)
                        monitor_data_past = monitor_data_cur
                    else:
                        data=plugin.check()
                    log.debug("check "+name+" get:"+str(data))

                    if isinstance(data, list):
                        for datanode in data:
                            self.sender.emit(datanode)
                    else:
                        self.sender.emit(data)

            cost=timer.total()
            if cost > self.check_frequency:
                log.warn("collect metrics cost time {0} is longer than check_frequency {1}".format(cost, self.check_frequency))
            else:
                log.debug("sleep {0}s for next loop".format(self.check_frequency - cost))
                time.sleep(self.check_frequency - cost)
def evaluate(model,
             data,
             eval_links,
             saver,
             max_num_examples=None,
             test=False):
    with torch.no_grad():
        model = model.to(FLAGS.device)
        model.eval()
        total_loss = 0
        all_pair_list = []
        iter_timer = Timer()
        eval_dataset = torch.utils.data.dataset.TensorDataset(eval_links)
        data_loader = DataLoader(eval_dataset,
                                 batch_size=FLAGS.batch_size,
                                 shuffle=True)

        for iter, batch_gids in enumerate(data_loader):
            if max_num_examples and len(all_pair_list) >= max_num_examples:
                break

            batch_gids = batch_gids[0]
            if len(batch_gids) == 0:
                continue
            batch_data = BatchData(batch_gids,
                                   data.dataset,
                                   is_train=False,
                                   unique_graphs=FLAGS.batch_unique_graphs)
            if FLAGS.lower_level_layers and FLAGS.higher_level_layers:
                if FLAGS.pair_interaction:
                    model.use_layers = 'lower_layers'
                    model(batch_data)
                model.use_layers = 'higher_layers'
            else:
                model.use_layers = 'all'
            loss = model(batch_data)

            batch_data.restore_interaction_nxgraph()
            total_loss += loss.item()
            all_pair_list.extend(batch_data.pair_list)
            if test:
                saver.log_tvt_info(
                    '\tIter: {:03d}, Test Loss: {:.7f}\t\t{}'.format(
                        iter + 1, loss, iter_timer.time_and_clear()))
    return all_pair_list, total_loss / (iter + 1)
Beispiel #8
0
class WriterTensorboardX(metaclass=Singleton):
    def __init__(self, log_dir, logger, enable):
        self.log_dir = log_dir
        self.writer = None
        if enable:
            log_dir = str(log_dir)
            try:
                self.writer = importlib.import_module(
                    'tensorboardX').SummaryWriter(log_dir)
            except ImportError:
                message = (
                    "Warning: TensorboardX visualization is configured "
                    "to use, but currently not installed on this machine. "
                    "Please install the package by 'pip install tensorboardx'"
                    " command or turn off the option "
                    "in the 'config.json' file.")
                logger.warning(message)
        self.step = 0
        self.mode = ''

        self.tb_writer_ftns = [
            'add_scalar', 'add_scalars', 'add_image', 'add_images',
            'add_audio', 'add_text', 'add_histogram', 'add_pr_curve',
            'add_embedding', 'add_figure'
        ]
        self.tag_mode_exceptions = ['add_histogram', 'add_embedding']
        self.timer = Timer()

    def set_step(self, step, mode='train'):
        self.mode = mode
        self.step = step
        if step == 0:
            self.timer.reset()
        else:
            duration = self.timer.check()
            self.add_scalar('steps_per_sec', 1 / duration)

    def __getattr__(self, name):
        """
        If visualization is configured to use:
            return add_data() methods of tensorboard with additional
            information (step, tag) added.
        Otherwise:
            return a blank function handle that does nothing
        """
        if name in self.tb_writer_ftns:
            add_data = getattr(self.writer, name, None)

            def wrapper(tag, data, *args, **kwargs):
                if add_data is not None:
                    # add mode(train/valid) tag
                    if name not in self.tag_mode_exceptions:
                        tag = '{}/{}'.format(tag, self.mode)
                    add_data(tag, data, self.step, *args, **kwargs)

            return wrapper
        else:
            # default action for returning methods defined in this class,
            # set_step() for instance.
            try:
                attr = object.__getattr__(name)
            except AttributeError:
                raise AttributeError(
                    "type object 'WriterTensorboardX' has no attribute '{}'".
                    format(name))
            return attr
Beispiel #9
0
def eval_once(saver, ckpt_path, summary_writer, eval_summary_ops,
              eval_summary_phs, imdb, model):

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:

        # Restores from checkpoint
        saver.restore(sess, ckpt_path)
        # Assuming model_checkpoint_path looks something like:
        #   /ckpt_dir/model.ckpt-0,
        # extract global_step from it.
        global_step = ckpt_path.split('/')[-1].split('-')[-1]
        #(7,-9,-1)0.000
        #(7,-15,-2)0.743
        #(7,-16,-2)0.800
        #(7,-16,-1)0.891
        #(7,-15,-1)0.788
        #(7,-14,-1)0.450
        #(7,-15,0)0.774
        #new
        #(7,-7, 0)0.457
        #(7,-9,-1)0.881
        #(7,-8,-1)0.669
        #(7,-9,-2)0.778
        #(7,-10,-3)0.010
        #fix_ops = f2p.float2pow2_offline(7, -9, -1, "./data/weights", sess, resave=True, convert=False, trt=True)
        # sess.run(tf.initialize_variables([fix_ops]))
        # sess.run(tf.initialize_variables(tf.trainable_variables()))
        # sess.run(tf.initialize_variables(tf.all_variables()))
        #sess.run(fix_ops)
        #exit()

        # for x in tf.trainable_variables():
        #   print (x.eval(session=sess))
        # save_data(sess, "../data/fixedweight")

        num_images = len(imdb.image_idx)

        all_boxes = [[[] for _ in xrange(num_images)]
                     for _ in xrange(imdb.num_classes)]

        _t = {'im_detect': Timer(), 'im_read': Timer(), 'misc': Timer()}

        #np.set_printoptions(threshold='nan')
        #probs_file = open('probs.txt', 'w')
        #boxes_file = open('boxes.txt', 'w')
        #class_file = open('class.txt', 'w')
        # preds_file = open('preds.txt', 'w')
        #conf_file = open('conf.txt', 'w')

        num_detection = 0.0
        for i in xrange(num_images):
            _t['im_read'].tic()
            images, scales = imdb.read_image_batch(shuffle=False)
            _t['im_read'].toc()

            _t['im_detect'].tic()
            # det_boxes, det_probs, det_class = sess.run(
            #      [model.det_boxes, model.det_probs, model.det_class],
            #      feed_dict={model.image_input:images})

            # sess = tf_debug.LocalCLIDebugWrapperSession(sess)
            # sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
            det_boxes, det_probs, det_class, det_conf, preds = sess.run(
                [
                    model.det_boxes, model.det_probs, model.det_class,
                    model.pred_conf, model.preds
                ],
                feed_dict={model.image_input: images})
            ################ save feature map #######################
            image_input, conv1, pool1, stage_1_1, left_a, left_b, right_a, right_b, right_c = sess.run(
                [
                    model.image_input, model.conv1, model.pool1,
                    model.stage_1_1, model.left_a, model.left_b, model.right_a,
                    model.right_b, model.right_c
                ],
                feed_dict={model.image_input: images})
            _t['im_detect'].toc()
            ##############print the feature map##############
            # print('left_a  is:'+str(left_a))
            # print('left_b  is:'+str(left_b))
            # print('right_a  is:'+str(right_a))
            # print('right_b  is:'+str(right_b))
            # print('right_c  is:'+str(right_c))

            LocalPath = os.getcwd()
            save_path = LocalPath + '/data0119/'
            if not os.path.exists(save_path):
                os.mkdir(save_path)
            conv1_ratio = save_dirct(np.ravel(conv1), save_path, 'conv1')
            pool1_ratio = save_dirct(np.ravel(pool1), save_path, 'pool1')
            left_a_ratio = save_dirct(np.ravel(left_a), save_path, 'left_a')
            left_b_ratio = save_dirct(np.ravel(left_b), save_path, 'left_b')
            right_a_ratio = save_dirct(np.ravel(right_a), save_path, 'right_a')
            right_b_ratio = save_dirct(np.ravel(right_b), save_path, 'right_b')
            right_c_ratio = save_dirct(np.ravel(right_c), save_path, 'right_c')

            ratio_list = [
                conv1_ratio, pool1_ratio, left_a_ratio, left_b_ratio,
                right_a_ratio, right_b_ratio, right_c_ratio
            ]
            ratio_file = open(save_path + 'ratio.txt', 'w+')
            for i in range(len(ratio_list)):
                ratio_file.write(str(ratio_list[i]) + '\n')
            ############### save feature map over ################

            ############### read ckpt file #######################
            layer_list = ['conv1', 'pool1', 'stage_1_1']
            ckpt_path = LocalPath + '/log/train_original/model.ckpt-0'

            ############### read ckpt file over ##################

            # det_boxes, det_probs, det_class, preds = sess.run(
            #     [model.det_boxes, model.det_probs, model.det_class, model.preds],
            #     feed_dict={model.image_input:images})
            #probs_file.write(str(det_probs))
            #boxes_file.write(str(det_boxes))
            #class_file.write(str(det_class))
            # preds_file.write(str(preds))
            #conf_file.write(str(det_conf))
            #probs_file.close()
            #boxes_file.close()
            #class_file.close()
            # preds_file.close()
            #conf_file.close()
            #exit()
            # print("the shape of fearue is:"+str(np.shape(conv1)))
            # draw_featuremap('conv1',conv1)
            # conv1 = conv1.tolist()
            # np.savetxt('helloworld.txt',conv1)
            # print(pool1)
            # print(conv_final)
            _t['misc'].tic()
            for j in range(len(det_boxes)):  # batch
                # rescale
                det_boxes[j, :, 0::2] /= scales[j][0]
                det_boxes[j, :, 1::2] /= scales[j][1]

                # det_bbox, score, det_class = model.filter_prediction(
                #     det_boxes[j], det_probs[j], det_class[j])
                det_bbox, score, det_class = model.dac_filter_prediction(
                    det_boxes[j], det_probs[j], det_class[j], det_conf[j])

                num_detection += len(det_bbox)
                for c, b, s in zip(det_class, det_bbox, score):
                    all_boxes[c][i].append(bbox_transform(b) + [s])
            _t['misc'].toc()
            # probs_file.write(str(score))
            # boxes_file.write(str(det_bbox))
            # class_file.write(str(det_class))
            # # preds_file.write(str(preds))
            # # conf_file.write(str(det_conf))
            # probs_file.close()
            # boxes_file.close()
            # class_file.close()
            # exit()
            # print(preds)
            print('im_detect: {:d}/{:d} im_read: {:.3f}s '
                  'detect: {:.3f}s misc: {:.3f}s'.format(
                      i + 1, num_images, _t['im_read'].average_time,
                      _t['im_detect'].average_time, _t['misc'].average_time))

        print('Evaluating detections...')
        aps, ap_names = imdb.evaluate_detections(FLAGS.eval_dir, global_step,
                                                 all_boxes)

        print('Evaluation summary:')
        print('  Average number of detections per image: {}:'.format(
            num_detection / num_images))
        print('  Timing:')
        print('    im_read: {:.3f}s detect: {:.3f}s misc: {:.3f}s'.format(
            _t['im_read'].average_time, _t['im_detect'].average_time,
            _t['misc'].average_time))
        print('  Average precisions:')

        feed_dict = {}
        for cls, ap in zip(ap_names, aps):
            feed_dict[eval_summary_phs['APs/' + cls]] = ap
            print('    {}: {:.3f}'.format(cls, ap))

        print('    Mean average precision: {:.3f}'.format(np.mean(aps)))
        feed_dict[eval_summary_phs['APs/mAP']] = np.mean(aps)
        feed_dict[eval_summary_phs['timing/im_detect']] = \
            _t['im_detect'].average_time
        feed_dict[eval_summary_phs['timing/im_read']] = \
            _t['im_read'].average_time
        feed_dict[eval_summary_phs['timing/post_proc']] = \
            _t['misc'].average_time
        feed_dict[eval_summary_phs['num_det_per_image']] = \
            num_detection/num_images

        print('Analyzing detections...')
        stats, ims = imdb.do_detection_analysis_in_eval(
            FLAGS.eval_dir, global_step)

        eval_summary_str = sess.run(eval_summary_ops, feed_dict=feed_dict)
        for sum_str in eval_summary_str:
            summary_writer.add_summary(sum_str, global_step)
Beispiel #10
0
def eval_once(saver, summary_writer, imdb, model, mc):

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:

        # Initialize
        init = tf.global_variables_initializer()
        sess.run(init)

        #global_step = '0'
        global_step = None

        n_imgs = len(imdb.image_idx)
        n_iters = int(n_imgs / mc.BATCH_SIZE) + 1

        all_boxes = [[[] for _ in xrange(n_imgs)]
                     for _ in xrange(imdb.num_classes)]

        _t = {'im_detect': Timer(), 'im_read': Timer(), 'misc': Timer()}

        num_detection = 0.0
        for i in xrange(n_iters):
            _t['im_read'].tic()
            images, scales = imdb.read_image_batch(shuffle=False)
            _t['im_read'].toc()

            _t['im_detect'].tic()
            # TODO(jeff): remove output other than det_boxes, det_probs, det_class
            det_boxes, det_probs, det_class, probs, confs, \
              conv13, reorg20, concat20 = sess.run(
                [
                  model.det_boxes, model.det_probs, model.det_class,
                  model.probs, model.pred_conf,
                  model.conv13, model.reorg20, model.concat20
                ],
                feed_dict={model.image_input:images, \
                             model.is_training: False, model.keep_prob: 1.0}
              )
            _t['im_detect'].toc()

            _t['misc'].tic()
            for j in range(len(det_boxes)):  # batch
                # rescale
                det_boxes[j, :, 0::2] /= scales[j][0]
                det_boxes[j, :, 1::2] /= scales[j][1]

                det_bbox, score, det_class = model.filter_yolo_predict(
                    det_boxes[j], det_probs[j], det_class[j])

                num_detection += len(det_bbox)
                for c, b, s in zip(det_class, det_bbox, score):
                    all_boxes[c][i].append(bbox_transform(b) + [s])
            _t['misc'].toc()

            print('im_detect: {:d}/{:d} im_read: {:.3f}s '
                  'detect: {:.3f}s misc: {:.3f}s'.format(
                      i + 1, n_imgs, _t['im_read'].average_time,
                      _t['im_detect'].average_time, _t['misc'].average_time))

        print('Evaluating detections...')
        aps, ap_names = imdb.evaluate_detections(FLAGS.eval_dir, global_step,
                                                 all_boxes)

        print('Evaluation summary:')
        print('  Average number of detections per image: {}:'.format(
            num_detection / n_imgs))
        print('  Timing:')
        print('    im_read: {:.3f}s detect: {:.3f}s misc: {:.3f}s'.format(
            _t['im_read'].average_time, _t['im_detect'].average_time,
            _t['misc'].average_time))
        print('  Average precisions:')

        eval_summary_ops = []
        for cls, ap in zip(ap_names, aps):
            eval_summary_ops.append(tf.summary.scalar('APs/' + cls, ap))
            print('    {}: {:.3f}'.format(cls, ap))
        print('    Mean average precision: {:.3f}'.format(np.mean(aps)))
        eval_summary_ops.append(tf.summary.scalar('APs/mAP', np.mean(aps)))
        eval_summary_ops.append(
            tf.summary.scalar('timing/image_detect',
                              _t['im_detect'].average_time))
        eval_summary_ops.append(
            tf.summary.scalar('timing/image_read', _t['im_read'].average_time))
        eval_summary_ops.append(
            tf.summary.scalar('timing/post_process', _t['misc'].average_time))
        eval_summary_ops.append(
            tf.summary.scalar('num_detections_per_image',
                              num_detection / n_imgs))

        print('Analyzing detections...')
        stats, ims = imdb.do_detection_analysis_in_eval(
            FLAGS.eval_dir, global_step)
        for k, v in stats.iteritems():
            eval_summary_ops.append(
                tf.summary.scalar('Detection Analysis/' + k, v))

        eval_summary_str = sess.run(eval_summary_ops)
        for sum_str in eval_summary_str:
            summary_writer.add_summary(sum_str, global_step)
Beispiel #11
0
def eval_once(saver, ckpt_path, summary_writer, imdb, model):
    gpu_config = tf.ConfigProto(allow_soft_placement=True)
    gpu_config.gpu_options.allow_growth = True
    with tf.Session(config=gpu_config) as sess:
        # Restores from checkpoint
        saver.restore(sess, ckpt_path)
        # Assuming model_checkpoint_path looks something like:
        #   /ckpt_dir/model.ckpt-0,
        # extract global_step from it.
        print(ckpt_path + '!!!\n')
        global_step = ckpt_path.split('/')[-1].split('-')[-1]
        num_images = len(imdb.image_idx)
        all_boxes = [[[] for _ in xrange(num_images)]
                     for _ in xrange(imdb.num_classes)
                     ]  # this is an empty list of list

        _t = {'im_detect': Timer(), 'im_read': Timer(), 'misc': Timer()}
        num_detection = 0.0
        gt_bboxes = imdb._rois
        perm_idx = imdb._perm_idx
        num_objs = 0
        num_dets = 0
        num_repeated_error = 0
        num_loc_error = 0
        num_cls_error = 0
        num_bg_error = 0
        num_detected_obj = 0
        num_missed_error = 0
        num_correct = 0

        for i in xrange(int(num_images / imdb.mc.BATCH_SIZE)):
            #_t['im_read'].tic()
            images, scales = imdb.read_image_batch(shuffle=True)
            #_t['im_read'].toc()
            #_t['im_detect'].tic()
            det_boxes, det_probs, det_class = sess.run(
                [model.det_boxes, model.det_probs, model.det_class],
                feed_dict={model.image_input: images})
            #_t['im_detect'].toc()
            #_t['misc'].tic()
            for j in range(len(det_boxes)):  # batch
                det_bbox, score, det_cls = model.filter_prediction(
                    det_boxes[j], det_probs[j], det_class[j])
                images[j] = _draw_box(images[j] + imdb.mc.IMG_MEANS , det_bbox, [model.mc.CLASS_NAMES[idx]+': (%.2f)'% prob \
                    for idx, prob in zip(det_cls, score)], (0, 0, 255))

                num_detection += len(det_bbox)
                #for c, b, s in zip(det_cls, det_bbox, score):
                #  all_boxes[c][i].append(bbox_transform(b) + [s])
                gt_bbox = np.array(gt_bboxes[perm_idx[i * imdb.mc.BATCH_SIZE +
                                                      j]])
                #gt_bboxes = np.array(gt_bboxes)
                gt_bbox[:, 0:4:2] *= scales[j][0]
                gt_bbox[:, 1::2] *= scales[j][1]
                if len(gt_bbox) >= 1:
                    per_img_num_objs,per_img_num_dets, per_img_num_repeated_error,\
                      per_img_num_loc_error, per_img_num_cls_error,\
                        per_img_num_bg_error, per_img_num_detected_obj,\
                          per_img_num_missed_error, per_img_num_correct = _analyse_det(gt_bbox, zip(det_bbox, det_cls))
                    num_objs += per_img_num_objs
                    num_dets += per_img_num_dets
                    num_repeated_error += per_img_num_repeated_error
                    num_loc_error += per_img_num_loc_error
                    num_cls_error += per_img_num_cls_error
                    num_bg_error += per_img_num_bg_error
                    num_detected_obj += per_img_num_detected_obj
                    num_missed_error += per_img_num_missed_error
                    num_correct += per_img_num_correct
        viz_image_per_batch = bgr_to_rgb(images)
        viz_summary = sess.run(
            model.viz_op, feed_dict={model.image_to_show: viz_image_per_batch})
        summary_writer.add_summary(viz_summary, global_step)
        summary_writer.flush()
        print('Detection Analysis:')
        print('    Number of detections: {}'.format(num_dets))
        print('    Number of objects: {}'.format(num_objs))
        print('    Percentage of correct detections: {}'.format(
            num_correct / (num_dets + sys.float_info.epsilon)))
        print('    Percentage of localization error: {}'.format(
            num_loc_error / (num_dets + sys.float_info.epsilon)))
        print('    Percentage of classification error: {}'.format(
            num_cls_error / (num_dets + sys.float_info.epsilon)))
        print('    Percentage of background error: {}'.format(
            num_bg_error / (num_dets + sys.float_info.epsilon)))
        print('    Percentage of repeated detections: {}'.format(
            num_repeated_error / (num_dets + sys.float_info.epsilon)))
        print('    Recall: {}'.format(num_detected_obj / num_objs))
Beispiel #12
0
def eval_checkpoint(model, imdb, saver, summary_writer, test_dir,
                    checkpoint_path, eval_summary_phs, eval_summary_ops):
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.05)
    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                          gpu_options=gpu_options)) as sess:
        global_step = checkpoint_path.split('/')[-1].split('-')[-1]

        if os.path.exists(
                os.path.join(test_dir, 'detection_files_' + str(global_step))):
            print('Already evaluated')
            return
        saver.restore(sess, checkpoint_path)

        num_images = len(imdb.image_idx)

        all_boxes = [[[] for _ in xrange(num_images)]
                     for _ in xrange(imdb.num_classes)]

        _t = {'im_detect': Timer(), 'im_read': Timer(), 'misc': Timer()}
        num_detection = 0.0
        for i in xrange(num_images):
            _t['im_read'].tic()
            images, scales = imdb.read_image_batch(shuffle=False)
            _t['im_read'].toc()

            _t['im_detect'].tic()
            det_boxes, det_probs, det_class = sess.run(
                [model.det_boxes, model.det_probs, model.det_class],
                feed_dict={model.image_input: images})
            _t['im_detect'].toc()

            _t['misc'].tic()
            for j in xrange(len(det_boxes)):  # batch
                # rescale
                det_boxes[j, :, 0::2] /= scales[j][0]
                det_boxes[j, :, 1::2] /= scales[j][1]

                det_bbox, score, det_class = model.filter_prediction(
                    det_boxes[j], det_probs[j], det_class[j])
                num_detection += len(det_bbox)
                for c, b, s in zip(det_class, det_bbox, score):
                    all_boxes[c][i].append(bbox_transform(b) + [s])
            _t['misc'].toc()

            print('im_detect: %s/%s im_read: %.3fs detect: %.3fs misc: %.3fs' %
                  (i + 1, num_images, _t['im_read'].average_time,
                   _t['im_detect'].average_time, _t['misc'].average_time))

        print('Evaluating detections...')
        aps, ap_names = imdb.evaluate_detections(test_dir, global_step,
                                                 all_boxes)

        print('Evaluation summary:')
        print('  Average number of detections per image: %s:' %
              (num_detection / num_images))
        print('  Timing:')
        print('    im_read: %.3fs detect: %.3fs misc: %.3fs' %
              (_t['im_read'].average_time, _t['im_detect'].average_time,
               _t['misc'].average_time))
        print('  Average precisions:')
        feed_dict = {}
        for cls, ap in zip(ap_names, aps):
            feed_dict[eval_summary_phs['APs/' + cls]] = ap
            print('    %s: %.3f' % (cls, ap))

        print('    Mean average precision: %.3f' % np.mean(aps))
        feed_dict[eval_summary_phs['APs/mAP']] = np.mean(aps)
        feed_dict[eval_summary_phs['timing/im_detect']] = _t[
            'im_detect'].average_time
        feed_dict[
            eval_summary_phs['timing/im_read']] = _t['im_read'].average_time
        feed_dict[
            eval_summary_phs['timing/post_proc']] = _t['misc'].average_time
        feed_dict[
            eval_summary_phs['num_det_per_image']] = num_detection / num_images

        print('Analyzing detections...')
        stats, ims = imdb.do_detection_analysis_in_eval(test_dir, global_step)

        eval_summary_str = sess.run(eval_summary_ops, feed_dict=feed_dict)
        for sum_str in eval_summary_str:
            summary_writer.add_summary(sum_str, global_step)
def _train(num_iters_total,
           train_data,
           val_data,
           train_val_links,
           model,
           optimizer,
           saver,
           fold_num,
           retry_num=0):
    fold_str = '' if fold_num is None else 'Fold_{}_'.format(fold_num)
    fold_str = fold_str + 'retry_{}_'.format(
        retry_num) if retry_num > 0 else fold_str
    if fold_str == '':
        print("here")
    epoch_timer = Timer()
    total_loss = 0
    curr_num_iters = 0
    val_results = {}
    if FLAGS.sampler == "neighbor_sampler":
        sampler = NeighborSampler(train_data, FLAGS.num_neighbors_sample,
                                  FLAGS.batch_size)
        estimated_iters_per_epoch = ceil(
            (len(train_data.dataset.gs_map) / FLAGS.batch_size))
    elif FLAGS.sampler == "random_sampler":
        sampler = RandomSampler(train_data, FLAGS.batch_size,
                                FLAGS.sample_induced)
        estimated_iters_per_epoch = ceil(
            (len(train_data.dataset.train_pairs) / FLAGS.batch_size))
    else:
        sampler = EverythingSampler(train_data)
        estimated_iters_per_epoch = 1

    moving_avg = MovingAverage(FLAGS.validation_window_size)
    iters_per_validation = FLAGS.iters_per_validation \
        if FLAGS.iters_per_validation != -1 else estimated_iters_per_epoch

    for iter in range(FLAGS.num_iters):
        model.train()
        model.zero_grad()
        batch_data = model_forward(model, train_data, sampler=sampler)
        loss = _train_iter(batch_data, model, optimizer)
        batch_data.restore_interaction_nxgraph()
        total_loss += loss
        num_iters_total_limit = FLAGS.num_iters
        curr_num_iters += 1
        if num_iters_total_limit is not None and \
                num_iters_total == num_iters_total_limit:
            break
        if iter % FLAGS.print_every_iters == 0:
            saver.log_tvt_info("{}Iter {:04d}, Loss: {:.7f}".format(
                fold_str, iter + 1, loss))
            if COMET_EXPERIMENT:
                COMET_EXPERIMENT.log_metric("{}loss".format(fold_str), loss,
                                            iter + 1)
        if (iter + 1) % iters_per_validation == 0:
            eval_res, supplement = validation(
                model,
                val_data,
                train_val_links,
                saver,
                max_num_examples=FLAGS.max_eval_pairs)
            epoch = iter / estimated_iters_per_epoch
            saver.log_tvt_info('{}Estimated Epoch: {:05f}, Loss: {:.7f} '
                               '({} iters)\t\t{}\n Val Result: {}'.format(
                                   fold_str, epoch,
                                   eval_res["Loss"], curr_num_iters,
                                   epoch_timer.time_and_clear(), eval_res))
            if COMET_EXPERIMENT:
                COMET_EXPERIMENT.log_metrics(
                    eval_res,
                    prefix="{}validation".format(fold_str),
                    step=iter + 1)
                COMET_EXPERIMENT.log_histogram_3d(
                    supplement['y_pred'],
                    name="{}y_pred".format(fold_str),
                    step=iter + 1)
                COMET_EXPERIMENT.log_histogram_3d(
                    supplement['y_true'],
                    name='{}y_true'.format(fold_str),
                    step=iter + 1)
                confusion_matrix = supplement.get('confusion_matrix')
                if confusion_matrix is not None:
                    labels = [
                        k for k, v in sorted(
                            batch_data.dataset.interaction_edge_labels.items(),
                            key=lambda item: item[1])
                    ]
                    COMET_EXPERIMENT.log_confusion_matrix(
                        matrix=confusion_matrix, labels=labels, step=iter + 1)
            curr_num_iters = 0
            val_results[iter + 1] = eval_res
            if len(moving_avg.results) == 0 or (
                    eval_res[FLAGS.validation_metric] - 1e-7) > max(
                        moving_avg.results):
                saver.save_trained_model(model, iter + 1)
            moving_avg.add_to_moving_avg(eval_res[FLAGS.validation_metric])
            if moving_avg.stop():
                break
    return val_results
def eval_once(saver, ckpt_path, summary_writer, eval_summary_ops,
              eval_summary_phs, imdb, model):

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:

        # Restores from checkpoint
        saver.restore(sess, ckpt_path)

        # If we are applying simulated quantization
        if FLAGS.use_quantization:

            # Assertions for validity of quantization arguments
            assert FLAGS.rounding_method != 'none', \
                    "Must specify rounding method (nearest_neighbor or stochastic)"
            assert FLAGS.model_bits != 0, \
                    "Must specify non-zero number of model bits"
            #assert FLAGS.activation_bits != 0, \
            #        "Must specify non-zero number of activation bits"

            # Extract parameter references for quantization
            all_vars = ops.get_collection_ref(
                ops.GraphKeys.TRAINABLE_VARIABLES)

            # Get global minimums and maximums for weights (kernels) and biases
            global_min = float('inf')
            global_max = 0.0
            global_weight_min = float('inf')
            global_weight_max = 0.0
            global_bias_min = float('inf')
            global_bias_max = 0.0

            global_1x1_min = float('inf')
            global_1x1_max = 0.0
            global_3x3_min = float('inf')
            global_3x3_max = 0.0
            global_conv_min = float('inf')
            global_conv_max = 0.0

            global_1x1_weight_min = float('inf')
            global_1x1_weight_max = 0.0
            global_3x3_weight_min = float('inf')
            global_3x3_weight_max = 0.0
            global_conv_weight_min = float('inf')
            global_conv_weight_max = 0.0

            global_1x1_bias_min = float('inf')
            global_1x1_bias_max = 0.0
            global_3x3_bias_min = float('inf')
            global_3x3_bias_max = 0.0
            global_conv_bias_min = float('inf')
            global_conv_bias_max = 0.0

            for i in range(len(all_vars)):
                print(all_vars[i].name)
                tensor = sess.run(all_vars[i])
                tensor_min = np.amin(tensor)
                tensor_max = np.amax(tensor)
                # Update global range
                if tensor_min < global_min:
                    global_min = tensor_min
                    #print('new_min: '+str(global_min))
                if tensor_max > global_max:
                    global_max = tensor_max
                # Update kernel and bias ranges
                if ('kernels' in all_vars[i].name):
                    if tensor_min < global_weight_min:
                        global_weight_min = tensor_min
                        #print('new_kernel_min: '+str(global_weight_min))
                    if tensor_max > global_weight_max:
                        global_weight_max = tensor_max
                    # Update 1x1 and 3x3 ranges
                    if ('1x1' in all_vars[i].name):
                        if tensor_min < global_1x1_weight_min:
                            global_1x1_weight_min = tensor_min
                            #print('new_1x1_kernel_min: '+str(global_1x1_weight_min))
                        if tensor_max > global_1x1_weight_max:
                            global_1x1_weight_max = tensor_max
                    if ('3x3' in all_vars[i].name):
                        if tensor_min < global_3x3_weight_min:
                            global_3x3_weight_min = tensor_min
                            #print('new_3x3_kernel_min: '+str(global_3x3_weight_min))
                        if tensor_max > global_3x3_weight_max:
                            global_3x3_weight_max = tensor_max
                    if ('conv' in all_vars[i].name):
                        if tensor_min < global_conv_weight_min:
                            global_conv_weight_min = tensor_min
                            #print('new_conv_kernel_min: '+str(global_conv_weight_min))
                        if tensor_max > global_conv_weight_max:
                            global_conv_weight_max = tensor_max

                if ('biases' in all_vars[i].name):
                    if tensor_min < global_bias_min:
                        global_bias_min = tensor_min
                        #print('new_bias_min: '+str(global_bias_min))
                    if tensor_max > global_bias_max:
                        global_bias_max = tensor_max
                    # Update 1x1 and 3x3 ranges
                    if ('1x1' in all_vars[i].name):
                        if tensor_min < global_1x1_bias_min:
                            global_1x1_bias_min = tensor_min
                            #print('new_1x1_bias_min: '+str(global_1x1_bias_min))
                        if tensor_max > global_1x1_bias_max:
                            global_1x1_bias_max = tensor_max
                    if ('3x3' in all_vars[i].name):
                        if tensor_min < global_3x3_bias_min:
                            global_3x3_bias_min = tensor_min
                            #print('new_3x3_bias_min: '+str(global_3x3_bias_min))
                        if tensor_max > global_3x3_bias_max:
                            global_3x3_bias_max = tensor_max
                    if ('conv' in all_vars[i].name):
                        if tensor_min < global_conv_bias_min:
                            global_conv_bias_min = tensor_min
                            #print('new_conv_bias_min: '+str(global_conv_bias_min))
                        if tensor_max > global_conv_bias_max:
                            global_conv_bias_max = tensor_max

                # Update 1x1, 3x3, and conv ranges
                if ('1x1' in all_vars[i].name):
                    if tensor_min < global_1x1_min:
                        global_1x1_min = tensor_min
                    if tensor_max > global_1x1_max:
                        global_1x1_max = tensor_max
                if ('3x3' in all_vars[i].name):
                    if tensor_min < global_3x3_min:
                        global_3x3_min = tensor_min
                    if tensor_max > global_3x3_max:
                        global_3x3_max = tensor_max
                if ('conv' in all_vars[i].name):
                    if tensor_min < global_conv_min:
                        global_conv_min = tensor_min
                    if tensor_max > global_conv_max:
                        global_conv_max = tensor_max

            print('---')
            print('global spread:')
            print(global_max, global_min)
            print('global weight spread:')
            print(global_weight_max, global_weight_min)
            print('global bias spread:')
            print(global_bias_max, global_bias_min)
            print('---')
            print('global 1x1 spread:')
            print(global_1x1_max, global_1x1_min)
            print('global 1x1 weight spread:')
            print(global_1x1_weight_max, global_1x1_weight_min)
            print('global 1x1 bias spread:')
            print(global_1x1_bias_max, global_1x1_bias_min)
            print('---')
            print('global 3x3 spread:')
            print(global_3x3_max, global_3x3_min)
            print('global 3x3 weight spread:')
            print(global_3x3_weight_max, global_3x3_weight_min)
            print('global 3x3 bias spread:')
            print(global_3x3_bias_max, global_3x3_bias_min)
            print('---')
            print('global conv spread:')
            print(global_conv_max, global_conv_min)
            print('global conv weight spread:')
            print(global_conv_weight_max, global_conv_weight_min)
            print('global conv bias spread:')
            print(global_conv_bias_max, global_conv_bias_min)

            # For each set of parameters
            for i in range(len(all_vars)):
                print(all_vars[i].name)

                # Load the data into a numpy array for easy manipulation
                tensor = sess.run(all_vars[i])

                # If conv and fire layers are to be scaled separately
                if FLAGS.separate_layer_scales:
                    if ('conv' in all_vars[i].name):
                        min_quant_val = global_conv_min
                        max_quant_val = global_conv_max
                    elif ('fire' in all_vars[i].name):
                        min_quant_val = min(global_1x1_min, global_3x3_min)
                        max_quant_val = max(global_1x1_max, global_3x3_max)
                    else:
                        print(
                            "Error: Only conv, 3x3, and 1x1 currently supported"
                        )
                        exit()
                else:
                    min_quant_val = global_min
                    max_quant_val = global_max

                # Get the set of values for quantization
                quant_val_arr = \
                    get_quant_val_array_from_minmax(min_quant_val,
                                                    max_quant_val,
                                                    FLAGS.model_bits,
                                                    FLAGS.reserve_zero_val)

                # Loop over the whole tensor
                if 'biases' in all_vars[i].name:
                    for idx0 in range(0, tensor.shape[0]):
                        tensor[idx0] = round_to_quant_val( \
                                                quant_val_arr,
                                                tensor[idx0],
                                                FLAGS.rounding_method)
                if 'kernels' in all_vars[i].name:
                    for idx0 in range(0, tensor.shape[0]):
                        for idx1 in range(0, tensor.shape[1]):
                            for idx2 in range(0, tensor.shape[2]):
                                for idx3 in range(0, tensor.shape[3]):
                                    #print('----')
                                    #print(tensor[idx0][idx1][idx2][idx3])
                                    tensor[idx0][idx1][idx2][idx3] = \
                                            round_to_quant_val( \
                                                        quant_val_arr,
                                                        tensor[idx0][idx1][idx2][idx3],
                                                        FLAGS.rounding_method)
                                    #print(tensor[idx0][idx1][idx2][idx3])

                # Store the data back into the tensorflow variable
                test_op = tf.assign(all_vars[i], tensor)
                sess.run(test_op)
            '''
        for i in range(len(all_vars)):
            if (('kernels' in all_vars[i].name) and \
                    (not ('Momentum' in all_vars[i].name))):
                if True:
                    test_op = tf.assign(all_vars[i], \
                            tf.scalar_mul(0.90,
                            (all_vars[i])))
                    sess.run(test_op)
                    sess.run(all_vars[i])
        '''

        # Assuming model_checkpoint_path looks something like:
        #   /ckpt_dir/model.ckpt-0,
        # extract global_step from it.
        global_step = ckpt_path.split('/')[-1].split('-')[-1]

        num_images = len(imdb.image_idx)

        all_boxes = [[[] for _ in xrange(num_images)]
                     for _ in xrange(imdb.num_classes)]

        _t = {'im_detect': Timer(), 'im_read': Timer(), 'misc': Timer()}

        num_detection = 0.0
        for i in xrange(num_images):
            _t['im_read'].tic()
            images, scales = imdb.read_image_batch(shuffle=False)
            _t['im_read'].toc()

            _t['im_detect'].tic()
            det_boxes, det_probs, det_class = sess.run(
                [model.det_boxes, model.det_probs, model.det_class],
                feed_dict={model.image_input: images})
            _t['im_detect'].toc()

            _t['misc'].tic()
            for j in range(len(det_boxes)):  # batch
                # rescale
                det_boxes[j, :, 0::2] /= scales[j][0]
                det_boxes[j, :, 1::2] /= scales[j][1]

                det_bbox, score, det_class = model.filter_prediction(
                    det_boxes[j], det_probs[j], det_class[j])

                num_detection += len(det_bbox)
                for c, b, s in zip(det_class, det_bbox, score):
                    all_boxes[c][i].append(bbox_transform(b) + [s])
            _t['misc'].toc()

            print('im_detect: {:d}/{:d} im_read: {:.3f}s '
                  'detect: {:.3f}s misc: {:.3f}s'.format(
                      i + 1, num_images, _t['im_read'].average_time,
                      _t['im_detect'].average_time, _t['misc'].average_time))

        print('Evaluating detections...')
        aps, ap_names = imdb.evaluate_detections(FLAGS.eval_dir, global_step,
                                                 all_boxes)

        print('Evaluation summary:')
        print('  Average number of detections per image: {}:'.format(
            num_detection / num_images))
        print('  Timing:')
        print('    im_read: {:.3f}s detect: {:.3f}s misc: {:.3f}s'.format(
            _t['im_read'].average_time, _t['im_detect'].average_time,
            _t['misc'].average_time))
        print('  Average precisions:')

        feed_dict = {}
        for cls, ap in zip(ap_names, aps):
            feed_dict[eval_summary_phs['APs/' + cls]] = ap
            print('    {}: {:.3f}'.format(cls, ap))

        print('    Mean average precision: {:.3f}'.format(np.mean(aps)))
        feed_dict[eval_summary_phs['APs/mAP']] = np.mean(aps)
        feed_dict[eval_summary_phs['timing/im_detect']] = \
            _t['im_detect'].average_time
        feed_dict[eval_summary_phs['timing/im_read']] = \
            _t['im_read'].average_time
        feed_dict[eval_summary_phs['timing/post_proc']] = \
            _t['misc'].average_time
        feed_dict[eval_summary_phs['num_det_per_image']] = \
            num_detection/num_images

        print('Analyzing detections...')
        stats, ims = imdb.do_detection_analysis_in_eval(
            FLAGS.eval_dir, global_step)

        eval_summary_str = sess.run(eval_summary_ops, feed_dict=feed_dict)
        for sum_str in eval_summary_str:
            summary_writer.add_summary(sum_str, global_step)
Beispiel #15
0
def eval_once(saver, ckpt_path, summary_writer, eval_summary_ops,
              eval_summary_phs, imdb, model):

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:

        # Restores from checkpoint
        saver.restore(sess, ckpt_path)
        # Assuming model_checkpoint_path looks something like:
        #   /ckpt_dir/model.ckpt-0,
        # extract global_step from it.
        global_step = ckpt_path.split('/')[-1].split('-')[-1]

        #(7,-9,-1)0.000
        #(7,-15,-2)0.743
        #(7,-16,-2)0.800
        #(7,-16,-1)0.891
        #(7,-15,-1)0.788
        #(7,-14,-1)0.450
        #(7,-15,0)0.774
        #new
        #(7,-7, 0)0.457
        #(7,-9,-1)0.881
        #(7,-8,-1)0.669
        #(7,-9,-2)0.778
        #(7,-10,-3)0.010
        #fix_ops = f2p.float2pow2_offline(7, -9, -1, "./data/weights", sess, resave=True, convert=False, trt=True)
        # sess.run(tf.initialize_variables([fix_ops]))
        # sess.run(tf.initialize_variables(tf.trainable_variables()))
        # sess.run(tf.initialize_variables(tf.all_variables()))
        #sess.run(fix_ops)
        #exit()

        # for x in tf.trainable_variables():
        #   print (x.eval(session=sess))
        # save_data(sess, "../data/fixedweight")

        num_images = len(imdb.image_idx)

        all_boxes = [[[] for _ in xrange(num_images)]
                     for _ in xrange(imdb.num_classes)]

        _t = {'im_detect': Timer(), 'im_read': Timer(), 'misc': Timer()}

        # np.set_printoptions(threshold='nan')
        # probs_file = open('probs.txt', 'w')
        # boxes_file = open('boxes.txt', 'w')
        # class_file = open('class.txt', 'w')
        # preds_file = open('preds.txt', 'w')

        num_detection = 0.0
        for i in xrange(num_images):
            _t['im_read'].tic()
            images, scales = imdb.read_image_batch(shuffle=False)
            _t['im_read'].toc()

            _t['im_detect'].tic()
            # det_boxes, det_probs, det_class = sess.run(
            #      [model.det_boxes, model.det_probs, model.det_class],
            #      feed_dict={model.image_input:images})
            det_boxes, det_probs, det_class, det_conf = sess.run(
                [
                    model.det_boxes, model.det_probs, model.det_class,
                    model.pred_conf
                ],
                feed_dict={model.image_input: images})
            _t['im_detect'].toc()

            # det_boxes, det_probs, det_class, preds = sess.run(
            #     [model.det_boxes, model.det_probs, model.det_class, model.preds],
            #     feed_dict={model.image_input:images})
            # probs_file.write(str(det_probs))
            # boxes_file.write(str(det_boxes))
            # class_file.write(str(det_class))
            # preds_file.write(str(preds))
            # probs_file.close()
            # boxes_file.close()
            # class_file.close()
            # preds_file.close()
            # exit()
            _t['misc'].tic()

            # take the clip and save
            LocalPath = os.getcwd()
            save_path = LocalPath + cfg.SAVE_PATH + '/activations'
            if not os.path.exists(save_path):
                os.mkdir(save_path)
            ratio_key = open(save_path + '/ratio_key.txt', 'w+')
            ratio_value = open(save_path + '/ratio_value.txt', 'w+')
            for x in layer_list:
                process_fm(checkpoint_path, x, parameter_save)
            ratio_key.close()
            ratio_value.close()
            ################# over ##################
            for j in range(len(det_boxes)):  # batch
                # rescale
                det_boxes[j, :, 0::2] /= scales[j][0]
                det_boxes[j, :, 1::2] /= scales[j][1]

                # det_bbox, score, det_class = model.filter_prediction(
                #     det_boxes[j], det_probs[j], det_class[j])
                det_bbox, score, det_class = model.dac_filter_prediction(
                    det_boxes[j], det_probs[j], det_class[j], det_conf[j])

                num_detection += len(det_bbox)
                for c, b, s in zip(det_class, det_bbox, score):
                    all_boxes[c][i].append(bbox_transform(b) + [s])
            _t['misc'].toc()

            print('im_detect: {:d}/{:d} im_read: {:.3f}s '
                  'detect: {:.3f}s misc: {:.3f}s'.format(
                      i + 1, num_images, _t['im_read'].average_time,
                      _t['im_detect'].average_time, _t['misc'].average_time))

        print('Evaluating detections...')
        aps, ap_names = imdb.evaluate_detections(FLAGS.eval_dir, global_step,
                                                 all_boxes)

        print('Evaluation summary:')
        print('  Average number of detections per image: {}:'.format(
            num_detection / num_images))
        print('  Timing:')
        print('    im_read: {:.3f}s detect: {:.3f}s misc: {:.3f}s'.format(
            _t['im_read'].average_time, _t['im_detect'].average_time,
            _t['misc'].average_time))
        print('  Average precisions:')

        feed_dict = {}
        for cls, ap in zip(ap_names, aps):
            feed_dict[eval_summary_phs['APs/' + cls]] = ap
            print('    {}: {:.3f}'.format(cls, ap))

        print('    Mean average precision: {:.3f}'.format(np.mean(aps)))
        feed_dict[eval_summary_phs['APs/mAP']] = np.mean(aps)
        feed_dict[eval_summary_phs['timing/im_detect']] = \
            _t['im_detect'].average_time
        feed_dict[eval_summary_phs['timing/im_read']] = \
            _t['im_read'].average_time
        feed_dict[eval_summary_phs['timing/post_proc']] = \
            _t['misc'].average_time
        feed_dict[eval_summary_phs['num_det_per_image']] = \
            num_detection/num_images

        print('Analyzing detections...')
        stats, ims = imdb.do_detection_analysis_in_eval(
            FLAGS.eval_dir, global_step)

        eval_summary_str = sess.run(eval_summary_ops, feed_dict=feed_dict)
        for sum_str in eval_summary_str:
            summary_writer.add_summary(sum_str, global_step)
Beispiel #16
0
def eval_once(
    saver, ckpt_path, summary_writer, eval_summary_ops, eval_summary_phs, imdb,
    model):

  gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
  with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)) as sess:
    # Assuming model_checkpoint_path looks something like:
    #   /ckpt_dir/model.ckpt-0,
    # extract global_step from it.
    global_step = ckpt_path.split('/')[-1].split('-')[-1]
    
    if os.path.exists(os.path.join(FLAGS.eval_dir, 'detection_files_' + str(global_step))):
        return
    
    # Restores from checkpoint
    saver.restore(sess, ckpt_path)

    num_images = len(imdb.image_idx)

    all_boxes = [[[] for _ in xrange(num_images)]
                 for _ in xrange(imdb.num_classes)]

    _t = {'im_detect': Timer(), 'im_read': Timer(), 'misc': Timer()}

    num_detection = 0.0
    for i in xrange(num_images):
      _t['im_read'].tic()
      images, scales = imdb.read_image_batch(shuffle=False)
      _t['im_read'].toc()

      _t['im_detect'].tic()
      det_boxes, det_probs, det_class = sess.run(
          [model.det_boxes, model.det_probs, model.det_class],
          feed_dict={model.image_input:images})
      _t['im_detect'].toc()

      _t['misc'].tic()
      for j in range(len(det_boxes)): # batch
        # rescale
        det_boxes[j, :, 0::2] /= scales[j][0]
        det_boxes[j, :, 1::2] /= scales[j][1]

        det_bbox, score, det_class = model.filter_prediction(
            det_boxes[j], det_probs[j], det_class[j])

        num_detection += len(det_bbox)
        for c, b, s in zip(det_class, det_bbox, score):
          all_boxes[c][i].append(bbox_transform(b) + [s])
      _t['misc'].toc()

      print ('im_detect: {:d}/{:d} im_read: {:.3f}s '
             'detect: {:.3f}s misc: {:.3f}s'.format(
                i+1, num_images, _t['im_read'].average_time,
                _t['im_detect'].average_time, _t['misc'].average_time))

    print ('Evaluating detections...')
    aps, ap_names = imdb.evaluate_detections(
        FLAGS.eval_dir, global_step, all_boxes)

    print ('Evaluation summary:')
    print ('  Average number of detections per image: {}:'.format(
      num_detection/num_images))
    print ('  Timing:')
    print ('    im_read: {:.3f}s detect: {:.3f}s misc: {:.3f}s'.format(
      _t['im_read'].average_time, _t['im_detect'].average_time,
      _t['misc'].average_time))
    print ('  Average precisions:')

    feed_dict = {}
    for cls, ap in zip(ap_names, aps):
      feed_dict[eval_summary_phs['APs/'+cls]] = ap
      print ('    {}: {:.3f}'.format(cls, ap))

    print ('    Mean average precision: {:.3f}'.format(np.mean(aps)))
    feed_dict[eval_summary_phs['APs/mAP']] = np.mean(aps)
    feed_dict[eval_summary_phs['timing/im_detect']] = \
        _t['im_detect'].average_time
    feed_dict[eval_summary_phs['timing/im_read']] = \
        _t['im_read'].average_time
    feed_dict[eval_summary_phs['timing/post_proc']] = \
        _t['misc'].average_time
    feed_dict[eval_summary_phs['num_det_per_image']] = \
        num_detection/num_images

    print ('Analyzing detections...')
    stats, ims = imdb.do_detection_analysis_in_eval(
        FLAGS.eval_dir, global_step)

    eval_summary_str = sess.run(eval_summary_ops, feed_dict=feed_dict)
    for sum_str in eval_summary_str:
      summary_writer.add_summary(sum_str, global_step)
Beispiel #17
0
def eval_once(saver, ckpt_path, summary_writer, imdb, model):

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:

        # Restores from checkpoint
        saver.restore(sess, ckpt_path)
        # Assuming model_checkpoint_path looks something like:
        #   /ckpt_dir/model.ckpt-0,
        # extract global_step from it.
        global_step = ckpt_path.split('/')[-1].split('-')[-1]

        num_images = len(imdb.image_idx)

        total_acc, total_missed, total_culled = float(0.0), float(0.0), float(
            0.0)

        error_hist_bucket_width = 5
        error_hist = [0 for _ in range(int(100 / error_hist_bucket_width))]

        all_boxes = [[[] for _ in xrange(num_images)]
                     for _ in xrange(imdb.num_classes)]

        _t = {'im_detect': Timer(), 'im_read': Timer(), 'misc': Timer()}

        num_detection = 0.0
        acc, missed, culled = float(0.0), float(0.0), float(0.0)
        for i in xrange(num_images):
            _t['im_read'].tic()
            images, scales, gt_masks = imdb.read_image_batch(shuffle=False)
            _t['im_read'].toc()

            if FLAGS.net == 'resnet50_filter':
                _t['im_detect'].tic()
                pred_masks = sess.run([model.preds],
                                      feed_dict={
                                          model.image_input: images,
                                          model.keep_prob: 1.0
                                      })
                _t['im_detect'].toc()

                _t['misc'].tic()
                _t['misc'].toc()

                assert gt_masks[0].shape == pred_masks[0][0].shape, \
                        'Ground truth mask and predicted mask have different dimensions'

                # Metrics
                acc = (abs(gt_masks[0] - pred_masks[0][0]) <
                       float(0.5)).sum() / (12 * 39)
                missed = (gt_masks[0] - pred_masks[0][0] >
                          float(0.9)).sum() / (12 * 39)
                culled = (pred_masks[0][0] < float(0.1)).sum() / (12 * 39)

                # Update totals
                total_acc = total_acc + acc
                total_missed = total_missed + missed
                total_culled = total_culled + culled

                # Update histogram
                int_missed = int(
                    np.floor(missed * float(100.0) /
                             float(error_hist_bucket_width)))
                error_hist[int_missed] = error_hist[int_missed] + 1

            else:
                _t['im_detect'].tic()
                det_boxes, det_probs, det_class = sess.run(
                    [model.det_boxes, model.det_probs, model.det_class],
                    feed_dict={
                        model.image_input: images,
                        model.keep_prob: 1.0
                    })
                _t['im_detect'].toc()

                _t['misc'].tic()
                for j in range(len(det_boxes)):  # batch
                    # rescale
                    det_boxes[j, :, 0::2] /= scales[j][0]
                    det_boxes[j, :, 1::2] /= scales[j][1]

                    det_bbox, score, det_class = model.filter_prediction(
                        det_boxes[j], det_probs[j], det_class[j])

                    num_detection += len(det_bbox)
                    for c, b, s in zip(det_class, det_bbox, score):
                        all_boxes[c][i].append(bbox_transform(b) + [s])
                _t['misc'].toc()

            print('im_detect: {:d}/{:d} im_read: {:.3f}s '
                  'detect: {:.3f}s misc: {:.3f}s, '
                  'accuracy: {:.2f}%, missed: {:.2f}%, culled: {:.2f}%'.format(
                      i + 1, num_images, _t['im_read'].average_time,
                      _t['im_detect'].average_time, _t['misc'].average_time,
                      acc * float(100.0), missed * float(100.0),
                      culled * float(100.0)))

        total_acc = total_acc / float(num_images)
        total_missed = total_missed / float(num_images)
        total_culled = total_culled / float(num_images)
        print('Total # images: {:d}, Avg accuracy: {:.2f}%, '
              'Avg error: {:.2f}%, Avg culling: {:.2f}%'.format(
                  num_images, total_acc * float(100.0),
                  total_missed * float(100.0), total_culled * float(100.0)))
        print('Error histogram: {0}'.format(error_hist))
Beispiel #18
0
class TensorboardWriter:
    def __init__(self, log_dir, logger, enabled):
        self.writer = None
        self.selected_module = ""

        if enabled:
            log_dir = str(log_dir)

            # Retrieve vizualization writer.
            succeeded = False
            for module in ["torch.utils.tensorboard", "tensorboardX"]:
                try:
                    self.writer = importlib.import_module(module).SummaryWriter(log_dir)
                    succeeded = True
                    break
                except ImportError:
                    succeeded = False
                self.selected_module = module

            if not succeeded:
                message = "Warning: visualization (Tensorboard) is configured to use, but currently not installed on " \
                    "this machine. Please install either TensorboardX with 'pip install tensorboardx', upgrade " \
                    "PyTorch to version >= 1.1 for using 'torch.utils.tensorboard' or turn off the option in " \
                    "the 'config.json' file."
                logger.warning(message)

        self.step = 0
        self.mode = ''

        self.tb_writer_ftns = {
            'add_scalar', 'add_scalars', 'add_image', 'add_images', 'add_audio',
            'add_text', 'add_histogram', 'add_pr_curve', 'add_embedding'
        }
        self.tag_mode_exceptions = {'add_histogram', 'add_embedding'}
            
        self.timer = Timer()

    def set_step(self, step, mode='train'):
        self.mode = mode
        self.step = step
        if step == 0:
            self.timer.reset()
        else:
            duration = self.timer.check()
            self.add_scalar('steps_per_sec', 1 / duration)

    def __getattr__(self, name):
        """
        If visualization is configured to use:
            return add_data() methods of tensorboard with additional information (step, tag) added.
        Otherwise:
            return a blank function handle that does nothing
        """
        if name in self.tb_writer_ftns:
            add_data = getattr(self.writer, name, None)

            def wrapper(tag, data, *args, **kwargs):
                if add_data is not None:
                    # add mode(train/valid) tag
                    if name not in self.tag_mode_exceptions:
                        tag = '{}/{}'.format(tag, self.mode)
                    add_data(tag, data, self.step, *args, **kwargs)
            return wrapper
        else:
            # default action for returning methods defined in this class, set_step() for instance.
            try:
                attr = object.__getattr__(name)
            except AttributeError:
                raise AttributeError("type object '{}' has no attribute '{}'".format(self.selected_module, name))
            return attr
Beispiel #19
0
def eval_once(saver, ckpt_path, summary_writer, imdb, model):

  with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:

    # Restores from checkpoint
    saver.restore(sess, ckpt_path)
    # Assuming model_checkpoint_path looks something like:
    #   /ckpt_dir/model.ckpt-0,
    # extract global_step from it.
    global_step = ckpt_path.split('/')[-1].split('-')[-1]

    num_images = len(imdb.image_idx)

    all_boxes = [[[] for _ in xrange(num_images)]
                 for _ in xrange(imdb.num_classes)]

    _t = {'im_detect': Timer(), 'im_read': Timer(), 'misc': Timer()}

    num_detection = 0.0
    for i in xrange(num_images):
      _t['im_read'].tic()
      images, scales = imdb.read_image_batch(shuffle=False)
      _t['im_read'].toc()

      _t['im_detect'].tic()
      det_boxes, det_probs, det_class = sess.run(
          [model.det_boxes, model.det_probs, model.det_class],
          feed_dict={model.image_input:images, \
                     model.is_training: False, model.keep_prob: 1.0})
      _t['im_detect'].toc()

      _t['misc'].tic()
      for j in range(len(det_boxes)): # batch
        # rescale
        det_boxes[j, :, 0::2] /= scales[j][0]
        det_boxes[j, :, 1::2] /= scales[j][1]

        det_bbox, score, det_class = model.filter_prediction(
            det_boxes[j], det_probs[j], det_class[j])

        num_detection += len(det_bbox)
        for c, b, s in zip(det_class, det_bbox, score):
          all_boxes[c][i].append(bbox_transform(b) + [s])
      _t['misc'].toc()

      print ('im_detect: {:d}/{:d} im_read: {:.3f}s '
             'detect: {:.3f}s misc: {:.3f}s'.format(
                i+1, num_images, _t['im_read'].average_time,
                _t['im_detect'].average_time, _t['misc'].average_time))

    print ('Evaluating detections...')
    aps, ap_names = imdb.evaluate_detections(
        FLAGS.eval_dir, global_step, all_boxes)

    print ('Evaluation summary:')
    print ('  Average number of detections per image: {}:'.format(
      num_detection/num_images))
    print ('  Timing:')
    print ('    im_read: {:.3f}s detect: {:.3f}s misc: {:.3f}s'.format(
      _t['im_read'].average_time, _t['im_detect'].average_time,
      _t['misc'].average_time))
    print ('  Average precisions:')

    eval_summary_ops = []
    for cls, ap in zip(ap_names, aps):
      eval_summary_ops.append(
          tf.scalar_summary('APs/'+cls, ap)
      )
      print ('    {}: {:.3f}'.format(cls, ap))
    print ('    Mean average precision: {:.3f}'.format(np.mean(aps)))
    eval_summary_ops.append(
        tf.scalar_summary('APs/mAP', np.mean(aps))
    )
    eval_summary_ops.append(
        tf.scalar_summary('timing/image_detect', _t['im_detect'].average_time)
    )
    eval_summary_ops.append(
        tf.scalar_summary('timing/image_read', _t['im_read'].average_time)
    )
    eval_summary_ops.append(
        tf.scalar_summary('timing/post_process', _t['misc'].average_time)
    )
    eval_summary_ops.append(
        tf.scalar_summary('num_detections_per_image', num_detection/num_images)
    )

    print ('Analyzing detections...')
    stats, ims = imdb.do_detection_analysis_in_eval(
        FLAGS.eval_dir, global_step)
    for k, v in stats.iteritems():
      eval_summary_ops.append(
          tf.scalar_summary(
            'Detection Analysis/'+k, v)
      )

    eval_summary_str = sess.run(eval_summary_ops)
    for sum_str in eval_summary_str:
      summary_writer.add_summary(sum_str, global_step)
Beispiel #20
0
def evaluate():
    """Evaluate."""
    assert FLAGS.dataset == 'ILSVRC2013', \
        'Only ILSVRC2013'

    with tf.Graph().as_default() as g:

        mc = imagenet_config()
        mc.PRETRAINED_MODEL_PATH = FLAGS.pkl_path
        imdb = imagenet(FLAGS.image_set, FLAGS.data_path, mc)

        assert FLAGS.net in ['darknet19', 'vgg16'], \
            'Selected neural net architecture not supported: {}'.format(FLAGS.net)
        if FLAGS.net == 'darknet19':
            model = DARKNET19(mc, FLAGS.gpu)
        elif FLAGS.net == 'vgg16':
            model = VGG16(mc, FLAGS.gpu)

        # save model size, flops, activations by layers
        with open(os.path.join(FLAGS.eval_dir, 'model_metrics.txt'), 'w') as f:
            f.write('Number of parameter by layer:\n')
            count = 0
            for c in model.model_size_counter:
                f.write('\t{}: {}\n'.format(c[0], c[1]))
                count += c[1]
            f.write('\ttotal: {}\n'.format(count))

            count = 0
            f.write('\nActivation size by layer:\n')
            for c in model.activation_counter:
                f.write('\t{}: {}\n'.format(c[0], c[1]))
                count += c[1]
            f.write('\ttotal: {}\n'.format(count))

            count = 0
            f.write('\nNumber of flops by layer:\n')
            for c in model.flop_counter:
                f.write('\t{}: {}\n'.format(c[0], c[1]))
                count += c[1]
            f.write('\ttotal: {}\n'.format(count))
        f.close()
        print('Model statistics saved to {}.'.format(
            os.path.join(FLAGS.eval_dir, 'model_metrics.txt')))

        init = tf.global_variables_initializer()

        with tf.Session(config=tf.ConfigProto(
                allow_soft_placement=True)) as sess:
            # run init
            sess.run(init)

            # testing
            num_images = len(imdb.image_idx)
            num_batches = np.ceil(float(num_images) / mc.BATCH_SIZE).astype(
                np.int64)

            _t = {'im_cls': Timer(), 'im_read': Timer()}

            all_labels, all_preds = [], []
            for i in xrange(num_batches):
                if i == 10: break
                print '{} / {}'.format(i + 1, num_batches)
                _t['im_read'].tic()
                images, labels, scales = imdb.read_cls_batch(shuffle=False)
                _t['im_read'].toc()

                _t['im_cls'].tic()
                cls_idx = sess.run(
                    [model.pred_class],
                    feed_dict={model.image_input:images, \
                               model.is_training: False})
                _t['im_cls'].toc()
                all_labels.extend(labels)
                all_preds.extend(cls_idx[0].tolist())

            # evaluate
            acc = 0.
            for i in xrange(num_images):
                if i == 320: break
                print 'label: {}, pred: {}'.format(all_labels[i],
                                                   all_preds[i] + 1)
                if all_labels[i] == all_preds[i] + 1:
                    acc += 1.
            acc = acc * 100. / num_images
            print 'Evaluation:'
            print '  Timing:'
            print '    im_read: {:.3f}s im_cls: {:.3f}'.format( \
              _t['im_read'].average_time / mc.BATCH_SIZE, \
              _t['im_cls'].average_time / mc.BATCH_SIZE)
            print '  Accuracy: {:.2f}%'.format(acc)