def initialize_tensorboard(self):
     outputdir = get_output_dir(self.root_dir)
     loggin_dir = os.path.join(outputdir, 'runs', 'clustering')
     if not os.path.exists(loggin_dir):
         os.makedirs(loggin_dir)
     self.logger_tensorboard = tensorboard_logger.Logger(
         os.path.join(loggin_dir, '{}'.format(self.id)))
Exemple #2
0
def main(args, net=None):
    datadir = get_data_dir(args.db)
    outputdir = get_output_dir(args.db)

    use_cuda = torch.cuda.is_available()

    # Set the seed for reproducing the results
    random.seed(args.manualSeed)
    np.random.seed(args.manualSeed)
    torch.manual_seed(args.manualSeed)
    if use_cuda:
        torch.cuda.manual_seed_all(args.manualSeed)
        torch.backends.cudnn.enabled = True
        cudnn.benchmark = True

    kwargs = {'num_workers': 0, 'pin_memory': True} if use_cuda else {}
    trainset = DCCPT_data(root=datadir, train=True, h5=args.h5)
    testset = DCCPT_data(root=datadir, train=False, h5=args.h5)

    # load from checkpoint if we're not given an external net
    load_checkpoint = True if net is None else False
    if net is None:
        net = dp.load_predefined_extract_net(args)

    totalset = torch.utils.data.ConcatDataset([trainset, testset])
    dataloader = torch.utils.data.DataLoader(totalset, batch_size=100, shuffle=False, **kwargs)

    # copying model params from checkpoint
    if load_checkpoint:
        filename = os.path.join(outputdir, args.torchmodel)
        if os.path.isfile(filename):
            print("==> loading params from checkpoint '{}'".format(filename))
            checkpoint = torch.load(filename)
            net.load_state_dict(checkpoint['state_dict'])
        else:
            print("==> no checkpoint found at '{}'".format(filename))
            raise ValueError

    if use_cuda:
        net.cuda()

    print('Extracting features ...')
    features, features_dr, labels = extract(dataloader, net, use_cuda)
    print('Done.\n')

    feat_path = os.path.join(datadir, args.feat)
    if args.h5:
        import h5py
        fo = h5py.File(feat_path + '.h5', 'w')
        fo.create_dataset('labels', data=labels)
        fo.create_dataset('Z', data=np.squeeze(features_dr))
        fo.create_dataset('data', data=np.squeeze(features))
        fo.close()
    else:
        fo = open(feat_path + '.pkl', 'wb')
        pickle.dump({'labels': labels, 'Z': np.squeeze(features_dr), 'data': np.squeeze(features)}, fo, protocol=2)
        fo.close()
    return features, features_dr, labels
Exemple #3
0
def main(args):
    datadir = get_data_dir(args.db)
    outputdir = get_output_dir(args.db)

    logger = None
    if args.tensorboard:
        # One should create folder for storing logs
        loggin_dir = os.path.join(outputdir, 'runs', 'pretraining')
        if not os.path.exists(loggin_dir):
            os.makedirs(loggin_dir)
        loggin_dir = os.path.join(loggin_dir, '%s' % (args.id))
        if args.clean_log:
            remove_files_in_dir(loggin_dir)
        logger = Logger(loggin_dir)

    use_cuda = torch.cuda.is_available()

    # Set the seed for reproducing the results
    random.seed(args.manualSeed)
    np.random.seed(args.manualSeed)
    torch.manual_seed(args.manualSeed)
    if use_cuda:
        torch.cuda.manual_seed_all(args.manualSeed)
        torch.backends.cudnn.enabled = True
        cudnn.benchmark = True

    kwargs = {'num_workers': 0, 'pin_memory': True} if use_cuda else {}
    trainset = DCCPT_data(root=datadir, train=True, h5=args.h5)
    testset = DCCPT_data(root=datadir, train=False, h5=args.h5)

    nepoch = int(
        np.ceil(
            np.array(args.niter * args.batchsize, dtype=float) /
            len(trainset)))
    step = int(
        np.ceil(
            np.array(args.step * args.batchsize, dtype=float) / len(trainset)))

    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=args.batchsize,
                                              shuffle=True,
                                              **kwargs)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=100,
                                             shuffle=True,
                                             **kwargs)

    return pretrain(
        args, outputdir, {
            'nlayers': 4,
            'dropout': 0.2,
            'reluslope': 0.0,
            'nepoch': nepoch,
            'lrate': [args.lr],
            'wdecay': [0.0],
            'step': step
        }, use_cuda, trainloader, testloader, logger)
 def load_pretrained_fd_autoencoder(self):
     """
     load pretrained stack denoise autoencoder
     """
     outputdir = get_output_dir(self.root_dir)
     ##########################
     outputdir = self.root_dir
     ##########################
     net_filename = os.path.join(outputdir, cfg.PRETRAINED_FAE_FILENAME)
     checkpoint = torch.load(net_filename)
     # there some problems when loading cuda pretrained models
     self.fd_ae.load_state_dict(checkpoint['state_dict'])
     if self.use_cuda:
         self.fd_ae.cuda()
Exemple #5
0
def main():
    global args
    use_cuda = torch.cuda.is_available()
    initialize_environment(random_seed=cfg.RNG_SEED, use_cuda=use_cuda)

    args = parser.parse_args()
    datadir = args.db_dir
    outputdir = get_output_dir(args.db_dir)
    nepoch = args.nepoch
    step = args.step_epoch
    dropout = args.dropout
    n_layers = cfg.N_LAYERS
    input_dim = cfg.INPUT_DIM
    hidden_dims = cfg.HIDDEN_DIMS

    # logging information
    loggin_dir = os.path.join(outputdir, 'runs', 'pretraining')
    if not os.path.exists(loggin_dir):
        os.makedirs(loggin_dir)
    tensorboard_logger.configure(os.path.join(loggin_dir, '%s' % (args.id)))

    trainset = EncodedTextDataset(root=datadir, train=True)
    testset = EncodedTextDataset(root=datadir, train=False)
    kwargs = {'num_workers': 0, 'pin_memory': True} if use_cuda else {}
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=args.batchsize,
                                              shuffle=True,
                                              **kwargs)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=100,
                                             shuffle=True,
                                             **kwargs)

    pretrain(
        outputdir, {
            'nlayers': n_layers,
            'dropout': dropout,
            'reluslope': 0.0,
            'nepoch': nepoch,
            'lrate': [args.lr],
            'wdecay': [0.0],
            'step': step,
            'input_dim': input_dim,
            'hidden_dims': hidden_dims
        }, use_cuda, trainloader, testloader)
Exemple #6
0
def read_datasets(args):
    imdb, roidb = read_db(args.imdb_name)
    print('{:d} roidb entries'.format(len(roidb)))

    # output directory where the models are saved
    output_dir = get_output_dir(imdb, args.tag)
    print('Output will be saved to `{:s}`'.format(output_dir))

    # tensorboard directory where the summaries are saved during training
    #tb_dir = get_output_tb_dir(imdb, args.tag)
    #print('TensorFlow summaries will be saved to `{:s}`'.format(tb_dir))

    # also add the validation set, but with no flipping images
    orgflip = cfg.TRAIN.USE_FLIPPED
    cfg.TRAIN.USE_FLIPPED = False
    _, valroidb = read_db(args.imdbval_name)
    print('{:d} validation roidb entries'.format(len(valroidb)))
    cfg.TRAIN.USE_FLIPPED = orgflip

    return imdb, roidb, valroidb, output_dir
Exemple #7
0
                    type=str)
parser.add_argument('--out',
                    dest='out',
                    help='path to the output file',
                    default=None,
                    type=str)
parser.add_argument('--h5',
                    dest='h5',
                    action='store_true',
                    help='to store as h5py file')

if __name__ == '__main__':

    args = parser.parse_args()
    datadir = get_data_dir(args.db)
    outputdir = get_output_dir(args.db)

    featurefile = os.path.join(datadir, args.feat)
    graphfile = os.path.join(datadir, args.g)
    outputfile = os.path.join(datadir, args.out)
    if os.path.isfile(featurefile) and os.path.isfile(graphfile):

        if args.h5:
            data0 = h5py.File(featurefile, 'r')
            data1 = h5py.File(graphfile, 'r')
            data2 = h5py.File(outputfile + '.h5', 'w')
        else:
            fo = open(featurefile, 'rb')
            data0 = cPickle.load(fo)
            data1 = sio.loadmat(graphfile)
            fo.close()
Exemple #8
0
def test_net(net, imdb, max_per_image=100, thresh=0.05, vis=False):
    """Test a Fast R-CNN network on an image database."""
    num_images = len(imdb.image_index)
    # all detections are collected into:
    #    all_boxes[cls][image] = N x 5 array of detections in
    #    (x1, y1, x2, y2, score)
    all_boxes = [[[] for _ in xrange(num_images)]
                 for _ in xrange(imdb.num_classes)]

    output_dir = get_output_dir(imdb, net)

    # timers
    _t = {'im_detect': Timer(), 'misc': Timer()}

    roidb = imdb.roidb

    for i in xrange(num_images):
        # filter out any ground truth boxes

        # The roidb may contain ground-truth rois (for example, if the roidb
        # comes from the training or val split). We only want to evaluate
        # detection on the *non*-ground-truth rois. We select those the rois
        # that have the gt_classes field set to 0, which means there's no
        # ground truth.

        # I had to create this hard limit because ~2k proposals just won't fit into 4 GB of my VRAM
        box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0][:1792]

        im = cv2.imread(imdb.image_path_at(i))
        _t['im_detect'].tic()
        scores, boxes = im_detect(net, im, box_proposals)
        _t['im_detect'].toc()

        _t['misc'].tic()
        # skip j = 0, because it's the background class
        for j in xrange(1, imdb.num_classes):
            inds = np.where(scores[:, j] > thresh)[0]
            cls_scores = scores[inds, j]
            cls_boxes = boxes[inds, j * 4:(j + 1) * 4]
            cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
                .astype(np.float32, copy=False)
            keep = nms(cls_dets, cfg.TEST.NMS)
            cls_dets = cls_dets[keep, :]
            if vis:
                vis_detections(im, imdb.classes[j], cls_dets)
            all_boxes[j][i] = cls_dets

        # Limit to max_per_image detections *over all classes*
        if max_per_image > 0:
            image_scores = np.hstack(
                [all_boxes[j][i][:, -1] for j in xrange(1, imdb.num_classes)])
            if len(image_scores) > max_per_image:
                image_thresh = np.sort(image_scores)[-max_per_image]
                for j in xrange(1, imdb.num_classes):
                    keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
                    all_boxes[j][i] = all_boxes[j][i][keep, :]
        _t['misc'].toc()

        print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
              .format(i + 1, num_images, _t['im_detect'].average_time,
                      _t['misc'].average_time)

    det_file = os.path.join(output_dir, 'detections.pkl')
    with open(det_file, 'wb') as f:
        cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)

    print 'Evaluating detections'
    imdb.evaluate_detections(all_boxes, output_dir)
Exemple #9
0
    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    train_cfg.GPU_ID = args.gpu_id

    print('Using config:')
    pprint.pprint(train_cfg)

    if not args.randomize:
        # fix the random seeds (numpy and caffe) for reproducibility
        np.random.seed(train_cfg.RNG_SEED)
        caffe.set_random_seed(train_cfg.RNG_SEED)

    # set up caffe
    caffe.set_mode_gpu()
    caffe.set_device(args.gpu_id)

    print 'imdb name `{:s}`'.format(args.imdb_name)
    imdb, roidb = combined_roidb(args.imdb_name)
    print '{:d} roidb entries'.format(len(roidb))

    output_dir = get_output_dir(imdb)
    print 'Output will be saved to `{:s}`'.format(output_dir)

    train_net(args.solver, roidb, output_dir,
              pretrained_model=args.pretrained_model,
              max_iters=args.max_iters)
Exemple #10
0
        cfg_from_list(['USE_PREFETCH', False])

    print('Using config:')
    pprint.pprint(cfg)

    if not args.randomize:
        # fix the random seeds (numpy and caffe) for reproducibility
        np.random.seed(cfg.RNG_SEED)
        caffe.set_random_seed(cfg.RNG_SEED)

    # set up caffe
    caffe.set_mode_gpu()
    if args.gpu_id is not None:
        caffe.set_device(args.gpu_id)

    output_dir = get_output_dir(None, args.exp_dir)
    print('Output will be saved to `{:s}`'.format(output_dir))

    if args.solver:
        # If solver is set, train the network using the solver prototxt.
        sw = Solver(args.solver, output_dir, args.weights)
        print('Training...')
        sw.train(args.max_iters)
        print('Finished Training')
    elif args.weights is not None and args.net is not None:
        # Run test
        net = caffe.Net(args.net, args.weights, caffe.TEST)
        raise NotImplementedError()
    else:
        print('Must provide either a caffemodel with a net definition or a solver prototxt')
Exemple #11
0
    print('Using config:')
    pprint.pprint(cfg)

    np.random.seed(cfg.RNG_SEED)

    img_size = resnet_v1.resnet_v1.default_image_size

    # train set
    print("Setting up image reader...")
    data_reader = deepscores_classification_datareader.class_dataset_reader(
        cfg.DATA_DIR + "/DeepScores_2017/DeepScores_classification",
        pad_to=[img_size, img_size])

    imdb = imdb("DeepScores_2017")
    # output directory where the models are saved
    output_dir = get_output_dir(imdb, args.tag)
    print('Output will be saved to `{:s}`'.format(output_dir))

    # tensorboard directory where the summaries are saved during training
    tb_dir = get_output_tb_dir(imdb, args.tag)
    print('TensorFlow summaries will be saved to `{:s}`'.format(tb_dir))

    num_classes = 124

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    # Get the selected model.
    # Some of they require pre-trained ResNet
    print("Preparing the model ...")
def main():
  args=parse_args()
  # 将args.cfg_file融合到cfg中
  from config import cfg, cfg_from_file
  if args.cfg_file is not None:
    cfg_from_file(args.cfg_file)

  tf.logging.info('Using Config:')
  pprint.pprint(cfg)
  
  # 指定或创建文件夹?????
  from config import get_output_dir
  train_dir = get_output_dir('default' if args.cfg_file is None else args.cfg_file)

  # 指定使用哪块GPU
  os.environ['CUDA_VISIBLE_DEVICES']=cfg.GPUS
  num_clones=len(cfg.GPUS.split(','))

  # 设置日志显示级别
  tf.logging.set_verbosity(tf.logging.INFO)

  # 创建计算图,并设置为默认图
  with tf.Graph().as_default():
    tf.set_random_seed(cfg.RNG_SEED)#?????
    # 关于之后要单间的模型的部署设置
    deploy_config=model_deploy.DeploymentConfig(num_clones=num_clones,clone_on_cpu=False,replica_id=0,num_replica=1,num_ps.task=0)
    
    # 创建global_step
    with tf.device(deploy_config.variables_device()):
      global_step=slim.creat_global_step()

    # ------------------------------数据集------------------------------#
    kwargs={}# 保存关于如何使用视频的超参数视频
    if cfg.TRAIN.VIDEO_FRAMES_PER_VIDEO>1:
      kwargs['num_samples']=cfg.TRAIN.VIDEO_FRAMES_PER_VIDEO
      kwargs['randomFromSegmentStyle']=cfg.TRAIN.READ_SEGMENT_STYLE
      kwargs['modality'] = cfg.INPUT.VIDEO.MODALITY   #输入模态:默认为rgb
      kwargs['split_id'] = cfg.INPUT.SPLIT_ID
      # 还有俩不知啥意思??????

    # 选择预处理函数(也是作者重新修改过的!!!!!!!!!!!!!!!)
    from preprocessing import preprocessing_factory
    image_preprocessing_fn=preprocessing_factory.get_preprocessing(preprocessing_name,is_training=True)
    
    # 读取数据——获取Dataset对象
    from datasets import dataset_factory #注意此datasets在本目录下,是作者自己编写的,其中的get_dataset函数没有发生变化,只是其调用的函数选项发生变化,是作者自定义的,返回Dataset对象和一个整数
    dataset,num_pose_keypoints=dataset_factory.get_dataset(cfg.DATASET_NAME,cfg.DATASET_SPLIT_NAME,cfg.DATASET_DIR,**kwargs)
    # 读取数据——创建provider,读取+预处理,打包成batch,建立预取队列!!!!!
    with tf.device(deploy_config.inputs_device()):
      provider=slim.dataset_data_provider.DatasetDataProvider(dataset,
                                                              num_readers= cfg.NUM_READERS,
                                                              common_queue_capacity= 20*cfg.TRAIN.BATCH_SIZE,
                                                              common_queue_min= 10*cfg.TRAIN.BATCH_SIZE)
      from preprocess_pipeline import train_preprocess_pipeline #该函数依据provider 和image_preprocessing_cn作为参数,从provider中读数据并且预处理
      [image,pose_label_hmap,pose_label_valid,action_label]=train_preprocess_pipeline(provider,cfg,     ,image_preprocessing_fn)# 真正读取数据??????????????
      # 打包batch
      images,pose_labels_hmap,pose_labels_valid,action_labels=tf.train.batch([image,pose_label_hmap,pose_label_valid,action_label],
                                                                              batch_size=cfg.TRAIN.BATCH_SIZE,
                                                                              num_thread=cfg.NUM_PREPROCESSING_THREADS,
                                                                              capacity=5*cfg.TRAIN.BATCH_SIZE)
      # 建立数据读取队列
      batch_queue=slim.prefetch_queue.prefetch_queue([images,pose_labels_hmap,pose_labels_valid,action_labels],
                                                      capacity=5*deploy_config.clones.cfg.TRAIN.ITER_SIZE)
      


    # ------------------------------选择网络?????------------------------------#
    def clone_fn(batch_queue):
      # 出队一个batch
      images,labels_pose,labels_pose_valid,labels_action=batch_queue.dequeue()
      labels_pose=tf.concat(tf.unstack(labels_pose),axis=0)
      labels_pose_valid=tf.concat(tf.unstack(labels_pose_valid),axis=0)
      
      # 前传(输入images)  注意:网络不仅会输出分类logits,还会输出姿态,但姿态输出记录在end_points
      logits,end_points=network_fn(images)
      pose_logits=end_points['PoseLogits']
      
      # 指定loss function 并计算loss
      # 该作者把一切都存进end_points里面了,
      end_points['Images']= images           # 存储信息只end_points中
      end_points['PoseLabels']= labels_pose
      end_points['ActionLabels']= labels_action
      end_points['ActionLogits']=logits
      gen_loss(labels_action,logits,cfg.TRAIN.LOSS_FN_ACTION,
              dataset.num_calsses,cfg.TRAIN.LOSS_FN_ACTION_WT,
              labels_pose,pose_logits,cfg.TRAIN.LOSS_FN_POSE,
              labels_pose_valid,cfg.TRAIN.LOSS_FN_POSE_WT,
              end_points,cfg)# 计算loss 该函数在loss模块中,loss.py就在当前路径下??????????????????

      return end_points
      
    # 收集summary
    summaries=set(tf.get_collection(tf.GRAPH.SUMMARIES))

    # clone是对输出和名称nz空间的封装
    clones=model_deploy.creat_clones(deploy_config,clone_fn,[batch_queue])
    first_clone_scope=deploy_config.clone_scope(0)
    update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS,first_clone_scope)
      

    from nets import nets_factory
    network_fn=net_factory.get_network_fn(cfg.MODEL_NAME,num_calsses=,num)#该函数作者又重新写过

    # 为每一个end_point节点加入监控
    for end_point in end_points:
      x= end_points[end_point]
      summaries.add(tf.summary.histogram('activations/'+ end_point, x))
    
    # 加入图片summary
    sum_img=tf.concat(tf.unstack(end_points['Image']))  # unstack作用是取消堆叠,也就是一帧一帧零散出来,用list包裹  concat感觉像是将所有图片按空间拼接起来,方便看每一帧
    if sum_img.get_shape().as_list()[-1] not in [1, 3, 4]:
      # 再做点处理  还不太懂???????
    # 加入summary
    summaries.add(tf.summary.image('images',sum_img))
    
    # 加入由于加入pose而导致模型中新增的endpoi
    for epname in cfg.TRAIN.OTHER_IMG_SUMMARY_TO_ADD:    # OTHER_IMG_SUMMARIES_TO_ADD = ['PosePrelogitsBasedAttention']
      if epname in end_points:
        summary.add(tf.summary.image('image_vis/'+ epname, end_points[epname]))
    
    summaries=summaries.union()   # 求summaries和参数的并集,还赋给summaries???????

    # 为loss增加summaries
    for loss in tf.get_collection(tf.Graphkeys.LOSSES,first_clone_scope):
      summaries.add(tf.summary.scalar(tensor=loss,name='losses/%s'% loss.op.name))
    
    # 为变量增加summies
    for variable in slim.get_model_variables():
      summaries.add(tf.summary.histogram(variable.op.name, variable))

    # 配置滑动平均 (moving average)

    # 配置优化程序
    with tf.device(deploy_config.optimizer_device()):
      # 设置学习率
      learning_rate= _configure_learning_rate(dataset.num_samples, num_clones, global_step)
      # 
      optimizer=_configure_optimizer(learning_rate)
      summaries.add(tf.summary.scalar(tensor=learning_rate,name='learning_rate'))

    # 设置哪些变量需要参与训练
    variables_to_train=_get_variables_to_train()
Exemple #13
0
def test_net(sess, net, imdb, weights_filename, max_per_image=100, thresh=0.):
    np.random.seed(cfg.RNG_SEED)  # 3
    """Test a Fast R-CNN network on an image database."""
    num_images = len(imdb.image_index)
    # all detections are collected into:
    #  all_boxes[cls][image] = N x 5 array of detections in
    #  (x1, y1, x2, y2, score)
    all_boxes = [[[] for _ in range(num_images)]
                 for _ in range(imdb.num_classes)]

    output_dir = get_output_dir(imdb, weights_filename)
    # timers
    _t = {'im_detect': Timer(), 'misc': Timer()}

    for i in range(num_images):
        im = cv2.imread(imdb.image_path_at(i))

        _t['im_detect'].tic()
        #得到的就是 是前景的概率scores  与    预测到的boxes边框
        scores, boxes = im_detect(sess, net, im)
        # scores 是rpn scores = self._predictions['cls_prob']  =  每个类别的概率cls_score 讲过soft_max得到
        # pred_boxes
        # pred_boxes = bbox_transform_inv(boxes, box_deltas)
        # 做回归预测   两条路劲rpn得到的的 box_deltas 作为 dx dy dw dh 与 筛选出来的框做回归预测
        # pred_boxes  anchors回归预测后的值
        #return scores, pred_boxes

        _t['im_detect'].toc()

        _t['misc'].tic()

        # skip j = 0, because it's the background class
        for j in range(1, imdb.num_classes):
            inds = np.where(scores[:, j] > thresh)[0]
            cls_scores = scores[inds, j]
            cls_boxes = boxes[inds, j * 4:(j + 1) * 4]
            cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
              .astype(np.float32, copy=False)
            keep = nms(cls_dets, cfg.TEST.NMS)
            cls_dets = cls_dets[keep, :]
            all_boxes[j][i] = cls_dets

        # Limit to max_per_image detections *over all classes*
        if max_per_image > 0:
            image_scores = np.hstack(
                [all_boxes[j][i][:, -1] for j in range(1, imdb.num_classes)])
            if len(image_scores) > max_per_image:
                image_thresh = np.sort(image_scores)[-max_per_image]
                for j in range(1, imdb.num_classes):
                    keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
                    all_boxes[j][i] = all_boxes[j][i][keep, :]
        _t['misc'].toc()

        print('im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
            .format(i + 1, num_images, _t['im_detect'].average_time,
                _t['misc'].average_time))

    det_file = os.path.join(output_dir, 'detections.pkl')
    with open(det_file, 'wb') as f:
        pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)

    print('Evaluating detections')
    imdb.evaluate_detections(all_boxes, output_dir)
Exemple #14
0
def main():
    args = parse_args()
    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)

    tf.logging.info('Using Config:')
    pprint.pprint(cfg)

    train_dir = get_output_dir(
        'default' if args.cfg_file is None else args.cfg_file)
    os.environ['CUDA_VISIBLE_DEVICES'] = cfg.GPUS
    num_clones = len(cfg.GPUS.split(','))

    tf.logging.set_verbosity(tf.logging.INFO)
    with tf.Graph().as_default():
        ######################
        # Config model_deploy#
        ######################
        tf.set_random_seed(cfg.RNG_SEED)
        deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones,
                                                      clone_on_cpu=False,
                                                      replica_id=0,
                                                      num_replicas=1,
                                                      num_ps_tasks=0)

        # Create global_step
        with tf.device(deploy_config.variables_device()):
            global_step = slim.create_global_step()

        ######################
        # Select the dataset #
        ######################
        kwargs = {}
        if cfg.TRAIN.VIDEO_FRAMES_PER_VIDEO > 1:
            kwargs['num_samples'] = cfg.TRAIN.VIDEO_FRAMES_PER_VIDEO
            kwargs['randomFromSegmentStyle'] = cfg.TRAIN.READ_SEGMENT_STYLE
            kwargs['modality'] = cfg.INPUT.VIDEO.MODALITY
            kwargs['split_id'] = cfg.INPUT.SPLIT_ID
        if cfg.DATASET_LIST_DIR != '':
            kwargs['dataset_list_dir'] = cfg.DATASET_LIST_DIR
        if cfg.INPUT_FILE_STYLE_LABEL != '':
            kwargs['input_file_style_label'] = cfg.INPUT_FILE_STYLE_LABEL
        dataset, num_pose_keypoints = dataset_factory.get_dataset(
            cfg.DATASET_NAME, cfg.TRAIN.DATASET_SPLIT_NAME, cfg.DATASET_DIR,
            **kwargs)

        ####################
        # Select the network #
        ####################
        network_fn = nets_factory.get_network_fn(
            cfg.MODEL_NAME,
            num_classes=(dataset.num_classes),
            num_pose_keypoints=num_pose_keypoints,
            weight_decay=cfg.TRAIN.WEIGHT_DECAY,
            is_training=True,
            cfg=cfg)  # advanced network creation controlled with cfg.NET

        #####################################
        # Select the preprocessing function #
        #####################################
        preprocessing_name = cfg.MODEL_NAME
        image_preprocessing_fn = preprocessing_factory.get_preprocessing(
            preprocessing_name, is_training=True)

        ##############################################################
        # Create a dataset provider that loads data from the dataset #
        ##############################################################
        with tf.device(deploy_config.inputs_device()):
            provider = slim.dataset_data_provider.DatasetDataProvider(
                dataset,
                num_readers=cfg.NUM_READERS,
                common_queue_capacity=20 * cfg.TRAIN.BATCH_SIZE,
                common_queue_min=10 * cfg.TRAIN.BATCH_SIZE)

            [image, pose_label_hmap, pose_label_valid,
             action_label] = train_preprocess_pipeline(provider, cfg,
                                                       network_fn,
                                                       num_pose_keypoints,
                                                       image_preprocessing_fn)
            # input_data = [preprocess_pipeline(
            #   provider, cfg, network_fn, num_pose_keypoints, image_preprocessing_fn)
            #   for _ in range(cfg.NUM_PREPROCESSING_THREADS)]

            images, pose_labels_hmap, pose_labels_valid, action_labels = tf.train.batch(
                [image, pose_label_hmap, pose_label_valid, action_label],
                # input_data,
                batch_size=cfg.TRAIN.BATCH_SIZE,
                num_threads=cfg.NUM_PREPROCESSING_THREADS,
                capacity=5 * cfg.TRAIN.BATCH_SIZE)
            batch_queue = slim.prefetch_queue.prefetch_queue(
                [images, pose_labels_hmap, pose_labels_valid, action_labels],
                capacity=5 * deploy_config.num_clones * cfg.TRAIN.ITER_SIZE)

        ####################
        # Define the model #
        ####################
        def clone_fn(batch_queue):
            """Allows data parallelism by creating multiple clones of network_fn."""
            images, labels_pose, labels_pose_valid, labels_action = batch_queue.dequeue(
            )
            # due to the multi-frame/video thing, need to squeeze first 2 dimensions
            labels_pose = tf.concat(tf.unstack(labels_pose), axis=0)
            labels_pose_valid = tf.concat(tf.unstack(labels_pose_valid),
                                          axis=0)
            logits, end_points = network_fn(images)
            pose_logits = end_points['PoseLogits']

            #############################
            # Specify the loss function #
            #############################
            # if 'AuxLogits' in end_points:
            #   slim.losses.softmax_cross_entropy(
            #       end_points['AuxLogits'], labels,
            #       label_smoothing=cfg.TRAIN.LABEL_SMOOTHING, weight=0.4, scope='aux_loss')
            # slim.losses.softmax_cross_entropy(
            #     logits, labels, label_smoothing=cfg.TRAIN.LABEL_SMOOTHING, weight=1.0)
            end_points['Images'] = images
            end_points['PoseLabels'] = labels_pose
            end_points['ActionLabels'] = labels_action
            end_points['ActionLogits'] = logits
            tf.logging.info('PoseLogits shape is {}.'.format(
                pose_logits.get_shape().as_list()))

            gen_losses(labels_action, logits, cfg.TRAIN.LOSS_FN_ACTION,
                       dataset.num_classes, cfg.TRAIN.LOSS_FN_ACTION_WT,
                       labels_pose, pose_logits, cfg.TRAIN.LOSS_FN_POSE,
                       labels_pose_valid, cfg.TRAIN.LOSS_FN_POSE_WT,
                       end_points, cfg)

            return end_points

        # Gather initial summaries.
        summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))

        clones = model_deploy.create_clones(deploy_config, clone_fn,
                                            [batch_queue])
        first_clone_scope = deploy_config.clone_scope(0)
        # Gather update_ops from the first clone. These contain, for example,
        # the updates for the batch_norm variables created by network_fn.
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
                                       first_clone_scope)

        # Add summaries for end_points.
        end_points = clones[0].outputs

        # store the end points in a global variable for debugging in train_step
        global end_points_debug
        end_points_debug = end_points

        for end_point in end_points:
            x = end_points[end_point]
            summaries.add(tf.summary.histogram('activations/' + end_point, x))
            # summaries.add(tf.summary.scalar(tf.nn.zero_fraction(x),
            #                                 name='sparsity/' + end_point))
        sum_img = tf.concat(tf.unstack(end_points['Images']), axis=0)
        if sum_img.get_shape().as_list()[-1] not in [1, 3, 4]:
            sum_img = tf.reduce_sum(sum_img, axis=-1, keep_dims=True)
            sum_img = sum_img - tf.reduce_min(sum_img)
            sum_img = sum_img / (tf.reduce_max(sum_img) + cfg.EPS)
        summaries.add(tf.summary.image('images', sum_img))
        for epname in cfg.TRAIN.OTHER_IMG_SUMMARIES_TO_ADD:
            if epname in end_points:
                summaries.add(
                    tf.summary.image('image_vis/' + epname,
                                     end_points[epname]))
        summaries = summaries.union(
            _summarize_heatmaps('labels', end_points['PoseLabels'], sum_img))
        summaries = summaries.union(
            _summarize_heatmaps('pose', end_points['PoseLogits'], sum_img))
        if 'PoseLossMask' in end_points:
            summaries = summaries.union(
                _summarize_heatmaps('loss_mask/pose',
                                    end_points['PoseLossMask'], sum_img))

        # Add summaries for losses.
        for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
            summaries.add(
                tf.summary.scalar(tensor=loss,
                                  name='losses/%s' % loss.op.name))

        # Add summaries for variables.
        for variable in slim.get_model_variables():
            summaries.add(tf.summary.histogram(variable.op.name, variable))

        #################################
        # Configure the moving averages #
        #################################
        if cfg.TRAIN.MOVING_AVERAGE_VARIABLES:
            moving_average_variables = slim.get_model_variables()
            variable_averages = tf.train.ExponentialMovingAverage(
                cfg.TRAIN.MOVING_AVERAGE_VARIABLES, global_step)
        else:
            moving_average_variables, variable_averages = None, None

        #########################################
        # Configure the optimization procedure. #
        #########################################
        with tf.device(deploy_config.optimizer_device()):
            learning_rate = _configure_learning_rate(dataset.num_samples,
                                                     num_clones, global_step)
            optimizer = _configure_optimizer(learning_rate)
            summaries.add(
                tf.summary.scalar(tensor=learning_rate, name='learning_rate'))

        # if cfg.sync_replicas:
        #   # If sync_replicas is enabled, the averaging will be done in the chief
        #   # queue runner.
        #   optimizer = tf.train.SyncReplicasOptimizer(
        #       opt=optimizer,
        #       replicas_to_aggregate=,
        #       variable_averages=variable_averages,
        #       variables_to_average=moving_average_variables,
        #       replica_id=tf.constant(cfg.task, tf.int32, shape=()),
        #       total_num_replicas=cfg.worker_replicas)
        # elif cfg.moving_average_decay:
        #   # Update ops executed locally by trainer.
        #   update_ops.append(variable_averages.apply(moving_average_variables))

        # Variables to train.
        variables_to_train = _get_variables_to_train()
        tf.logging.info('Training the following variables: {}'.format(
            ', '.join([var.op.name for var in variables_to_train])))

        #  and returns a train_tensor and summary_op
        total_loss, clones_gradients = model_deploy.optimize_clones(
            clones,
            optimizer,
            var_list=variables_to_train,
            clip_gradients=cfg.TRAIN.CLIP_GRADIENTS)
        # Add total_loss to summary.
        summaries.add(tf.summary.scalar(tensor=total_loss, name='total_loss'))

        # Create gradient updates.
        train_ops = {}
        if cfg.TRAIN.ITER_SIZE == 1:
            grad_updates = optimizer.apply_gradients(clones_gradients,
                                                     global_step=global_step)
            update_ops.append(grad_updates)

            update_op = tf.group(*update_ops)
            train_tensor = control_flow_ops.with_dependencies([update_op],
                                                              total_loss,
                                                              name='train_op')
            train_ops = train_tensor
        else:
            with tf.name_scope('AccumulateGradients'):
                # copied as is from my previous code
                gvs = [(grad, var) for grad, var in clones_gradients]
                varnames = [var.name for grad, var in gvs]
                varname_to_var = {var.name: var for grad, var in gvs}
                varname_to_grad = {var.name: grad for grad, var in gvs}
                varname_to_ref_grad = {}
                for vn in varnames:
                    grad = varname_to_grad[vn]
                    print("accumulating ... ", (vn, grad.get_shape()))
                    with tf.variable_scope("ref_grad"):
                        with tf.device(deploy_config.variables_device()):
                            ref_var = slim.local_variable(np.zeros(
                                grad.get_shape(), dtype=np.float32),
                                                          name=vn[:-2])
                            varname_to_ref_grad[vn] = ref_var

                all_assign_ref_op = [
                    ref.assign(varname_to_grad[vn])
                    for vn, ref in varname_to_ref_grad.items()
                ]
                all_assign_add_ref_op = [
                    ref.assign_add(varname_to_grad[vn])
                    for vn, ref in varname_to_ref_grad.items()
                ]
                assign_gradients_ref_op = tf.group(*all_assign_ref_op)
                accmulate_gradients_op = tf.group(*all_assign_add_ref_op)
                with tf.control_dependencies([accmulate_gradients_op]):
                    final_gvs = [(varname_to_ref_grad[var.name] /
                                  float(cfg.TRAIN.ITER_SIZE), var)
                                 for grad, var in gvs]
                    apply_gradients_op = optimizer.apply_gradients(
                        final_gvs, global_step=global_step)
                    update_ops.append(apply_gradients_op)
                    update_op = tf.group(*update_ops)
                    train_tensor = control_flow_ops.with_dependencies(
                        [update_op], total_loss, name='train_op')
                for i in range(cfg.TRAIN.ITER_SIZE):
                    if i == 0:
                        train_ops[i] = assign_gradients_ref_op
                    elif i < cfg.TRAIN.ITER_SIZE - 1:  # because apply_gradients also computes
                        # (see control_dependency), so
                        # no need of running an extra iteration
                        train_ops[i] = accmulate_gradients_op
                    else:
                        train_ops[i] = train_tensor

        # Add the summaries from the first clone. These contain the summaries
        # created by model_fn and either optimize_clones() or _gather_clone_loss().
        summaries |= set(
            tf.get_collection(tf.GraphKeys.SUMMARIES, first_clone_scope))

        # Merge all summaries together.
        summary_op = tf.summary.merge(list(summaries), name='summary_op')

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.intra_op_parallelism_threads = 4  # to avoid too many threads
        # The following seems optimal... though not sure
        config.inter_op_parallelism_threads = max(
            cfg.NUM_PREPROCESSING_THREADS, 12)
        ###########################
        # Kicks off the training. #
        ###########################
        slim.learning.train(train_ops,
                            train_step_fn=_train_step,
                            logdir=train_dir,
                            master='',
                            is_chief=True,
                            init_fn=_get_init_fn(train_dir),
                            summary_op=summary_op,
                            number_of_steps=cfg.TRAIN.MAX_NUMBER_OF_STEPS,
                            log_every_n_steps=cfg.TRAIN.LOG_EVERY_N_STEPS,
                            save_summaries_secs=cfg.TRAIN.SAVE_SUMMARIES_SECS,
                            save_interval_secs=cfg.TRAIN.SAVE_INTERVAL_SECS,
                            sync_optimizer=None,
                            session_config=config)
Exemple #15
0
def main():
    global args

    args = parser.parse_args()
    datadir = get_data_dir(args.db)
    outputdir = get_output_dir(args.db)

    use_cuda = torch.cuda.is_available()

    # Set the seed for reproducing the results
    random.seed(args.manualSeed)
    np.random.seed(args.manualSeed)
    torch.manual_seed(args.manualSeed)
    if use_cuda:
        torch.cuda.manual_seed_all(args.manualSeed)
        torch.backends.cudnn.enabled = True
        cudnn.benchmark = True

    reluslope = 0.0
    kwargs = {'num_workers': 0, 'pin_memory': True} if use_cuda else {}
    trainset = DCCPT_data(root=datadir, train=True, h5=args.h5)
    testset = DCCPT_data(root=datadir, train=False, h5=args.h5)
    if args.db == 'mnist':
        net = extract_sdae_mnist(slope=reluslope, dim=args.dim)
    elif args.db == 'reuters' or args.db == 'reuters10k' or args.db == 'rcv1':
        net = extract_sdae_reuters(slope=reluslope, dim=args.dim)
    elif args.db == 'ytf':
        net = extract_sdae_ytf(slope=reluslope, dim=args.dim)
    elif args.db == 'coil100':
        net = extract_sdae_coil100(slope=reluslope, dim=args.dim)
    elif args.db == 'yale':
        net = extract_sdae_yale(slope=reluslope, dim=args.dim)
    elif args.db == 'cmnist':
        net = extract_convsdae_mnist(slope=reluslope)
    elif args.db == 'ccoil100':
        net = extract_convsdae_coil100(slope=reluslope)
    elif args.db == 'cytf':
        net = extract_convsdae_ytf(slope=reluslope)
    elif args.db == 'cyale':
        net = extract_convsdae_yale(slope=reluslope)

    totalset = torch.utils.data.ConcatDataset([trainset, testset])
    dataloader = torch.utils.data.DataLoader(totalset, batch_size=100, shuffle=False, **kwargs)

    # copying model params from checkpoint
    filename = os.path.join(outputdir, args.torchmodel)
    if os.path.isfile(filename):
        print("==> loading params from checkpoint '{}'".format(filename))
        checkpoint = torch.load(filename)
        net.load_state_dict(checkpoint['state_dict'])
    else:
        print("==> no checkpoint found at '{}'".format(filename))
        raise

    if use_cuda:
        net.cuda()

    print('Extracting features ...')
    features, features_dr, labels = extract(dataloader, net, use_cuda)
    print('Done.\n')

    feat_path = os.path.join(datadir, args.feat)
    if args.h5:
        import h5py
        fo = h5py.File(feat_path + '.h5', 'w')
        fo.create_dataset('labels', data=labels)
        fo.create_dataset('Z', data=np.squeeze(features_dr))
        fo.create_dataset('data', data=np.squeeze(features))
        fo.close()
    else:
        fo = open(feat_path + '.pkl', 'wb')
        cPickle.dump({'labels': labels, 'Z': np.squeeze(features_dr), 'data': np.squeeze(features)}, fo, protocol=2)
        fo.close()
def test_net(sess, net, imdb, weights_filename , max_per_image=300, thresh=0.0001, vis=False):
    """Test a Fast R-CNN network on an image database."""
    num_images = len(imdb.image_index)
    # all detections are collected into:
    #    all_boxes[cls][image] = N x 5 array of detections in
    #    (x1, y1, x2, y2, score)
    all_boxes = [[[] for _ in range(num_images)]
                 for _ in range(imdb.num_classes)]

    output_dir = get_output_dir(imdb, weights_filename)
    # timers
    _t = {'im_detect' : Timer(), 'misc' : Timer()}

    if not cfg.TEST.HAS_RPN:
        roidb = imdb.roidb

    p = Pool(27)
    for i in range(num_images):
        # filter out any ground truth boxes
        if cfg.TEST.HAS_RPN:
            box_proposals = None
        else:
            # The roidb may contain ground-truth rois (for example, if the roidb
            # comes from the training or val split). We only want to evaluate
            # detection on the *non*-ground-truth rois. We select those the rois
            # that have the gt_classes field set to 0, which means there's no
            # ground truth.
            box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0]

        im = cv2.imread(imdb.image_path_at(i))
        _t['im_detect'].tic()
        scores, boxes = im_detect(sess, net, im, box_proposals)
        _t['im_detect'].toc()

        _t['misc'].tic()
        if vis:
            image = im[:, :, (2, 1, 0)]
            plt.cla()
            plt.imshow(image)
        ###add
        commands = []
        # skip j = 0, because it's the background class
        for j in range(1, imdb.num_classes):
            inds = np.where(scores[:, j] > thresh)[0]
            cls_scores = scores[inds, j]
            cls_boxes = boxes[inds, j*4:(j+1)*4]
            cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
                .astype(np.float32, copy=False)
            commands.append(cls_dets)

        nms_dets = p.map(psoft, commands)
        for j in range(1, imdb.num_classes):
            if vis:
                vis_detections(im, imdb.classes[j], nms_dets[j-1])
            all_boxes[j][i] = nms_dets[j-1]

        if vis:
           plt.show()

        # Limit to max_per_image detections *over all classes*
        if max_per_image > 0:
            image_scores = np.hstack([all_boxes[j][i][:, -1]
                                      for j in range(1, imdb.num_classes)])
            if len(image_scores) > max_per_image:
                image_thresh = np.sort(image_scores)[-max_per_image]
                for j in range(1, imdb.num_classes):
                    keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
                    all_boxes[j][i] = all_boxes[j][i][keep, :]
        _t['misc'].toc()

        print('im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
              .format(i + 1, num_images, _t['im_detect'].average_time,
                      _t['misc'].average_time))

    det_file = os.path.join(output_dir, 'detections.pkl')
    with open(det_file, 'wb') as f:
        cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)

    print('Evaluating detections')
    imdb.evaluate_detections(all_boxes, output_dir)
Exemple #17
0
def test_net(net, imdb, thresh=0.1, vis=False):
    data_reader = DataReader(**{'source': imdb.db_path})
    data_reader.Q_out = Queue(cfg.TEST.BATCH_SIZE)
    data_reader.start()
    num_images = data_reader.get_db_size()
    data_transformer = DataTransformer(**{'classes': imdb.classes})
    num_classes = imdb.num_classes
    all_boxes = [[[] for _ in xrange(num_images)]
                 for _ in xrange(imdb.num_classes)]
    imagenames = []
    recs = {}
    output_dir = get_output_dir(imdb, net)

    # timers
    _t = {'im_detect': Timer(), 'misc': Timer()}

    for i in xrange(0, num_images, cfg.TEST.BATCH_SIZE):
        ims = []
        for j in xrange(cfg.TEST.BATCH_SIZE):
            if i + j >= num_images: continue
            serialized = data_reader.Q_out.get()
            im = data_transformer.transform_image(serialized)
            filename, objects = data_transformer.transform_annos(serialized)
            imagenames.append(filename)
            recs[filename] = objects
            ims.append(im)
        _t['im_detect'].tic()
        batch_scores, batch_boxes = im_detect(net, ims)
        _t['im_detect'].toc()
        _t['misc'].tic()
        for item_id in xrange(len(batch_scores)):
            global_id = i + item_id
            scores = batch_scores[item_id]
            boxes = batch_boxes[item_id]
            for j in xrange(1, num_classes):
                inds = np.where(scores[:, j] > thresh)[0]
                if len(inds) < 1: continue
                cls_scores = scores[inds, j]
                cls_boxes = boxes[inds]
                pre_nms_inds = np.argsort(-cls_scores)[0:cfg.TEST.NMS_TOP_K]
                cls_scores = cls_scores[pre_nms_inds]
                cls_boxes = cls_boxes[pre_nms_inds]
                cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
                    .astype(np.float32, copy=False)
                keep = nms(cls_dets, cfg.TEST.NMS, force_cpu=True)
                cls_dets = cls_dets[keep, :]
                if vis:
                    vis_detections_all(ims[item_id], imdb.classes[j], cls_dets,
                                       thresh)
                all_boxes[j][global_id] = cls_dets
            if vis:
                cv2.imshow('SSD', ims[item_id])
                cv2.waitKey(0)

            # Limit to max_per_image detections *over all classes*
            if cfg.TEST.MAX_PER_IMAGE > 0:
                image_scores = np.array([], dtype=np.float32)
                for j in xrange(1, imdb.num_classes):
                    if len(all_boxes[j][global_id]) < 1: continue
                    image_scores = np.hstack(
                        [image_scores, all_boxes[j][global_id][:, -1]])
                if len(image_scores) > cfg.TEST.MAX_PER_IMAGE:
                    image_thresh = np.sort(
                        image_scores)[-cfg.TEST.MAX_PER_IMAGE]
                    for j in xrange(1, imdb.num_classes):
                        if len(all_boxes[j][global_id]) < 1: continue
                        keep = np.where(
                            all_boxes[j][global_id][:, -1] >= image_thresh)[0]
                        all_boxes[j][global_id] = all_boxes[j][global_id][
                            keep, :]

        _t['misc'].toc()
        print('im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
                .format(i + 1, num_images, _t['im_detect'].average_time,
                        _t['misc'].average_time))

    det_file = os.path.join(output_dir, 'detections.pkl')
    with open(det_file, 'wb') as f:
        cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)

    print('Evaluating detections')
    imdb.evaluate_detections(all_boxes, output_dir, imagenames, recs)
Exemple #18
0
    # data_dir = 'data/ag_news/'
    data_dir = args.data_dir
    n_clusters = args.n_clusters
    use_cuda = torch.cuda.is_available()
    random_seed = args.seed
    recons_lam = args.recons_lam
    cluster_lam = args.cluster_lam
    batch_size = args.batch_size
    tol = args.tol
    lr = args.lr

    initialize_environment(random_seed=random_seed, use_cuda=use_cuda)

    feat_path = os.path.join(data_dir, cfg.TRAIN_TEXT_FEAT_FILE_NAME)
    feat, labels, ids = load_feat(feat_path)
    outputdir = get_output_dir(data_dir)
    net_filename = os.path.join(outputdir, cfg.PRETRAINED_FAE_FILENAME)
    checkpoint = torch.load(net_filename)
    net = extract_sdae_model(input_dim=cfg.INPUT_DIM,
                             hidden_dims=cfg.HIDDEN_DIMS)
    net.load_state_dict(checkpoint['state_dict'])
    if use_cuda:
        net.cuda()

    dcn = DCN(n_clusters,
              net,
              cfg.HIDDEN_DIMS[-1],
              lr=lr,
              tol=tol,
              batch_size=batch_size,
              recons_lam=recons_lam,
def main():
    args, cfg = parse_args()
    train_dir = get_output_dir(
        'default' if args.cfg_file is None else args.cfg_file)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    print('Using Config:')
    pprint.pprint(cfg)

    tf.logging.set_verbosity(tf.logging.INFO)
    with tf.Graph().as_default():
        tf_global_step = tf.train.get_or_create_global_step()

        ######################
        # Select the dataset #
        ######################
        kwargs = {}
        if cfg.TEST.VIDEO_FRAMES_PER_VIDEO > 1:
            kwargs['num_samples'] = cfg.TEST.VIDEO_FRAMES_PER_VIDEO
            kwargs['modality'] = cfg.INPUT.VIDEO.MODALITY
            kwargs['split_id'] = cfg.INPUT.SPLIT_ID
        if args.dataset_list_dir is not None:
            kwargs['dataset_list_dir'] = args.dataset_list_dir
        elif cfg.DATASET_LIST_DIR != '':
            kwargs['dataset_list_dir'] = cfg.DATASET_LIST_DIR
        if cfg.INPUT_FILE_STYLE_LABEL != '':
            kwargs['input_file_style_label'] = cfg.INPUT_FILE_STYLE_LABEL
        dataset, num_pose_keypoints = dataset_factory.get_dataset(
            cfg.DATASET_NAME, cfg.TEST.DATASET_SPLIT_NAME, cfg.DATASET_DIR,
            **kwargs)

        ####################
        # Select the model #
        ####################
        network_fn = nets_factory.get_network_fn(
            cfg.MODEL_NAME,
            num_classes=dataset.num_classes,
            num_pose_keypoints=num_pose_keypoints,
            is_training=False,
            cfg=cfg)

        ##############################################################
        # Create a dataset provider that loads data from the dataset #
        ##############################################################
        provider = slim.dataset_data_provider.DatasetDataProvider(
            dataset,
            shuffle=False,
            num_epochs=1,
            common_queue_capacity=2 * cfg.TEST.BATCH_SIZE,
            common_queue_min=cfg.TEST.BATCH_SIZE)
        [image, action_label] = get_input(provider, cfg,
                                          ['image', 'action_label'])
        # label -= FLAGS.labels_offset

        #####################################
        # Select the preprocessing function #
        #####################################
        preprocessing_name = cfg.MODEL_NAME
        image_preprocessing_fn = preprocessing_factory.get_preprocessing(
            preprocessing_name, is_training=False)

        eval_image_size = cfg.TRAIN.IMAGE_SIZE or network_fn.default_image_size

        image = image_preprocessing_fn(image,
                                       eval_image_size,
                                       eval_image_size,
                                       resize_side_min=cfg.TRAIN.RESIZE_SIDE,
                                       resize_side_max=cfg.TRAIN.RESIZE_SIDE)

        # additional preprocessing as required
        if 'flips' in args.preprocs:
            tf.logging.info('Flipping all images while testing!')
            image = tf.stack(
                [tf.image.flip_left_right(el) for el in tf.unstack(image)])

        images, action_labels = tf.train.batch(
            [image, action_label],
            batch_size=cfg.TEST.BATCH_SIZE,
            # following is because if there are more, the order of batch can be
            # different due to different speed... so avoid that
            # http://stackoverflow.com/questions/35001027/does-batching-queue-tf-train-batch-not-preserve-order#comment57731040_35001027
            # num_threads=1 if args.save else cfg.NUM_PREPROCESSING_THREADS,
            num_threads=
            1,  # The above was too unsafe as sometimes I forgot --save
            # and it would just randomize the whole thing.
            # This is very important so
            # shifting to this by default. Better safe than sorry.
            allow_smaller_final_batch=True if cfg.TEST.VIDEO_FRAMES_PER_VIDEO
            == 1 else False,  # because otherwise we need to
            # average logits over the frames,
            # and that needs first dimensions
            # to be fully defined
            capacity=5 * cfg.TEST.BATCH_SIZE)

        ####################
        # Define the model #
        ####################
        logits, end_points = network_fn(images)
        end_points['images'] = images

        if cfg.TEST.MOVING_AVERAGE_DECAY:
            variable_averages = tf.train.ExponentialMovingAverage(
                cfg.TEST.MOVING_AVERAGE_DECAY, tf_global_step)
            variables_to_restore = variable_averages.variables_to_restore(
                slim.get_model_variables())
            variables_to_restore[tf_global_step.op.name] = tf_global_step
        else:
            variables_to_restore = slim.get_variables_to_restore()

        predictions = tf.argmax(logits, 1)
        if cfg.TRAIN.LOSS_FN_ACTION.startswith('multi-label'):
            logits = tf.sigmoid(logits)
        else:
            logits = tf.nn.softmax(logits, -1)
        labels = tf.squeeze(action_labels)
        end_points['labels'] = labels

        # Define the metrics:
        names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
            'Accuracy':
            slim.metrics.streaming_accuracy(predictions, labels),
            # 'Recall@5': slim.metrics.streaming_recall_at_k(
            #     logits, labels, 5),
        })

        # Print the summaries to screen.
        for name, value in names_to_values.iteritems():
            summary_name = 'eval/%s' % name
            op = tf.summary.scalar(summary_name, value, collections=[])
            op = tf.Print(op, [value], summary_name)
            tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)

        # TODO(sguada) use num_epochs=1
        if cfg.TEST.MAX_NUM_BATCHES:
            num_batches = cfg.TEST.MAX_NUM_BATCHES
        else:
            # This ensures that we make a single pass over all of the data.
            num_batches = math.ceil(dataset.num_samples /
                                    float(cfg.TEST.BATCH_SIZE))

        # just test the latest trained model
        checkpoint_path = cfg.TEST.CHECKPOINT_PATH or train_dir
        if tf.gfile.IsDirectory(checkpoint_path):
            checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
        else:
            checkpoint_path = checkpoint_path
        checkpoint_step = int(checkpoint_path.split('-')[-1])

        tf.logging.info('Evaluating %s' % checkpoint_path)

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        summary_writer = tf.summary.FileWriter(logdir=train_dir)

        if cfg.TEST.EVAL_METRIC == 'mAP' or args.save or args.ept:
            from tensorflow.python.training import supervisor
            from tensorflow.python.framework import ops
            import h5py
            saver = tf.train.Saver(variables_to_restore)
            sv = supervisor.Supervisor(graph=ops.get_default_graph(),
                                       logdir=None,
                                       summary_op=None,
                                       summary_writer=summary_writer,
                                       global_step=None,
                                       saver=None)
            all_labels = []
            end_points['logits'] = logits
            end_points_to_save = args.ept + ['logits']
            end_points_to_save = list(set(end_points_to_save))
            all_feats = dict([(ename, []) for ename in end_points_to_save])
            start_time = time.time()
            with sv.managed_session('',
                                    start_standard_services=False,
                                    config=config) as sess:
                saver.restore(sess, checkpoint_path)
                sv.start_queue_runners(sess)
                for j in tqdm(range(int(math.ceil(num_batches)))):
                    feats = sess.run([
                        action_labels,
                        [end_points[ename] for ename in end_points_to_save]
                    ])
                    all_labels.append(feats[0])
                    for ept_id, ename in enumerate(end_points_to_save):
                        all_feats[ename].append(feats[1][ept_id])
            print(time.time() - start_time)
            APs = []
            all_labels = np.concatenate(all_labels)
            if args.save or args.ept:
                res_outdir = os.path.join(train_dir, 'Features/')
                mkdir_p(res_outdir)
                outfpath = args.outfpath or os.path.join(
                    res_outdir, 'features_ckpt_{}_{}.h5'.format(
                        cfg.TEST.DATASET_SPLIT_NAME, checkpoint_step))
                print(
                    'Saving the features/logits/labels to {}'.format(outfpath))
                with h5py.File(outfpath, 'a') as fout:
                    for ename in end_points_to_save:
                        if ename in fout:
                            tf.logging.warning(
                                'Deleting {} from output HDF5 to write the '
                                'new features.'.format(ename))
                            del fout[ename]
                        if ename == 'labels':
                            feat_to_save = np.array(all_feats[ename])
                        else:
                            feat_to_save = np.concatenate(all_feats[ename])
                        try:
                            fout.create_dataset(ename,
                                                data=feat_to_save,
                                                compression='gzip',
                                                compression_opts=9)
                        except:
                            pdb.set_trace(
                            )  # manually deal with it and continue
                    if 'labels' in fout:
                        del fout['labels']
                    fout.create_dataset('labels',
                                        data=all_labels,
                                        compression='gzip',
                                        compression_opts=9)

            if args.ept:
                tf.logging.info(
                    'Evaluation had --ept passed in. '
                    'This indicates script was used for feature '
                    'extraction. Hence, not performing any evaluation.')
                return
            # Evaluation code
            all_logits = np.concatenate(all_feats['logits'])
            acc = np.mean(all_logits.argmax(axis=1) == all_labels)
            mAP = compute_map(all_logits, all_labels)[0]
            print('Mean AP: {}'.format(mAP))
            print('Accuracy: {}'.format(acc))
            summary_writer.add_summary(tf.Summary(value=[
                tf.Summary.Value(tag='mAP/{}'.format(
                    cfg.TEST.DATASET_SPLIT_NAME),
                                 simple_value=mAP)
            ]),
                                       global_step=checkpoint_step)
            summary_writer.add_summary(tf.Summary(value=[
                tf.Summary.Value(tag='Accuracy/{}'.format(
                    cfg.TEST.DATASET_SPLIT_NAME),
                                 simple_value=acc)
            ]),
                                       global_step=checkpoint_step)
        else:
            slim.evaluation.evaluate_once(
                master='',
                checkpoint_path=checkpoint_path,
                logdir=train_dir,
                num_evals=num_batches,
                eval_op=names_to_updates.values(),
                variables_to_restore=variables_to_restore,
                session_config=config)
Exemple #20
0
def main(args, net=None):
    global oldassignment

    datadir = get_data_dir(args.db)
    outputdir = get_output_dir(args.db)

    logger = None
    if args.tensorboard:
        # One should create folder for storing logs
        loggin_dir = os.path.join(outputdir, 'runs', 'DCC')
        if not os.path.exists(loggin_dir):
            os.makedirs(loggin_dir)
        loggin_dir = os.path.join(loggin_dir, '%s' % (args.id))
        if args.clean_log:
            remove_files_in_dir(loggin_dir)
        logger = Logger(loggin_dir)

    use_cuda = torch.cuda.is_available()

    # Set the seed for reproducing the results
    random.seed(args.manualSeed)
    np.random.seed(args.manualSeed)
    torch.manual_seed(args.manualSeed)
    if use_cuda:
        torch.cuda.manual_seed_all(args.manualSeed)
        torch.backends.cudnn.enabled = True
        cudnn.benchmark = True


    startepoch = 0
    kwargs = {'num_workers': 5, 'pin_memory': True} if use_cuda else {}

    # setting up dataset specific objects
    trainset = DCCPT_data(root=datadir, train=True, h5=args.h5)
    testset = DCCPT_data(root=datadir, train=False, h5=args.h5)
    numeval = len(trainset) + len(testset)

    # extracting training data from the pretrained.mat file
    data, labels, pairs, Z, sampweight = makeDCCinp(args)

    # For simplicity, I have created placeholder for each datasets and model
    load_pretraining = True if net is None else False
    if net is None:
        net = dp.load_predefined_extract_net(args)

    # reshaping data for some datasets
    if args.db == 'cmnist':
        data = data.reshape((-1, 1, 28, 28))
    elif args.db == 'ccoil100':
        data = data.reshape((-1, 3, 128, 128))
    elif args.db == 'cytf':
        data = data.reshape((-1, 3, 55, 55))
    elif args.db == 'cyale':
        data = data.reshape((-1, 1, 168, 192))

    totalset = torch.utils.data.ConcatDataset([trainset, testset])

    # computing and initializing the hyperparams
    _sigma1, _sigma2, _lambda, _delta, _delta1, _delta2, lmdb, lmdb_data = computeHyperParams(pairs, Z)
    oldassignment = np.zeros(len(pairs))
    stopping_threshold = int(math.ceil(cfg.STOPPING_CRITERION * float(len(pairs))))

    # Create dataset and random batch sampler for Finetuning stage
    trainset = DCCFT_data(pairs, data, sampweight)
    batch_sampler = DCCSampler(trainset, shuffle=True, batch_size=args.batchsize)

    # copying model params from Pretrained (SDAE) weights file
    if load_pretraining:
        load_weights(args, outputdir, net)


    # creating objects for loss functions, U's are initialized to Z here
    # Criterion1 corresponds to reconstruction loss
    criterion1 = DCCWeightedELoss(size_average=True)
    # Criterion2 corresponds to sum of pairwise and data loss terms
    criterion2 = DCCLoss(Z.shape[0], Z.shape[1], Z, size_average=True)

    if use_cuda:
        net.cuda()
        criterion1 = criterion1.cuda()
        criterion2 = criterion2.cuda()

    # setting up data loader for training and testing phase
    trainloader = torch.utils.data.DataLoader(trainset, batch_sampler=batch_sampler, **kwargs)
    testloader = torch.utils.data.DataLoader(totalset, batch_size=args.batchsize, shuffle=False, **kwargs)

    # setting up optimizer - the bias params should have twice the learning rate w.r.t. weights params
    bias_params = filter(lambda x: ('bias' in x[0]), net.named_parameters())
    bias_params = list(map(lambda x: x[1], bias_params))
    nonbias_params = filter(lambda x: ('bias' not in x[0]), net.named_parameters())
    nonbias_params = list(map(lambda x: x[1], nonbias_params))

    optimizer = optim.Adam([{'params': bias_params, 'lr': 2*args.lr},
                            {'params': nonbias_params},
                            {'params': criterion2.parameters(), 'lr': args.lr},
                            ], lr=args.lr, betas=(0.99, 0.999))

    # this is needed for WARM START
    if args.resume:
        filename = outputdir+'/FTcheckpoint_%d.pth.tar' % args.level
        if os.path.isfile(filename):
            print("==> loading checkpoint '{}'".format(filename))
            checkpoint = torch.load(filename)
            net.load_state_dict(checkpoint['state_dict'])
            criterion2.load_state_dict(checkpoint['criterion_state_dict'])
            startepoch = checkpoint['epoch']
            optimizer.load_state_dict(checkpoint['optimizer'])
            _sigma1 = checkpoint['sigma1']
            _sigma2 = checkpoint['sigma2']
            _lambda = checkpoint['lambda']
            _delta = checkpoint['delta']
            _delta1 = checkpoint['delta1']
            _delta2 = checkpoint['delta2']
        else:
            print("==> no checkpoint found at '{}'".format(filename))
            raise ValueError

    # This is the actual Algorithm
    flag = 0
    for epoch in range(startepoch, args.nepoch):
        if logger:
            logger.log_value('sigma1', _sigma1, epoch)
            logger.log_value('sigma2', _sigma2, epoch)
            logger.log_value('lambda', _lambda, epoch)

        train(trainloader, net, optimizer, criterion1, criterion2, epoch, use_cuda, _sigma1, _sigma2, _lambda, logger)
        Z, U, change_in_assign, assignment = test(testloader, net, criterion2, epoch, use_cuda, _delta, pairs, numeval, flag, logger)

        if flag:
            # As long as the change in label assignment < threshold, DCC continues to run.
            # Note: This condition is always met in the very first epoch after the flag is set.
            # This false criterion is overwritten by checking for the condition twice.
            if change_in_assign > stopping_threshold:
                flag += 1
            if flag == 4:
                break

        if((epoch+1) % args.M == 0):
            _sigma1 = max(_delta1, _sigma1 / 2)
            _sigma2 = max(_delta2, _sigma2 / 2)
            if _sigma2 == _delta2 and flag == 0:
                # Start checking for stopping criterion
                flag = 1

        # Save checkpoint
        index = (epoch // args.M) * args.M
        save_checkpoint({'epoch': epoch+1,
                         'state_dict': net.state_dict(),
                         'criterion_state_dict': criterion2.state_dict(),
                         'optimizer': optimizer.state_dict(),
                         'sigma1': _sigma1,
                         'sigma2': _sigma2,
                         'lambda': _lambda,
                         'delta': _delta,
                         'delta1': _delta1,
                         'delta2': _delta2,
                         }, index, filename=outputdir)

    output = {'Z': Z, 'U': U, 'gtlabels': labels, 'w': pairs, 'cluster':assignment}
    sio.savemat(os.path.join(outputdir, 'features'), output)
Exemple #21
0
def main():
    global args, oldassignment

    args = parser.parse_args()
    datadir = get_data_dir(args.db)
    outputdir = get_output_dir(args.db)

    if args.tensorboard:
        # One should create folder for storing logs
        loggin_dir = os.path.join(outputdir, 'runs', 'DCC')
        if not os.path.exists(loggin_dir):
            os.makedirs(loggin_dir)
        configure(os.path.join(loggin_dir, '%s' % (args.id)))

    use_cuda = torch.cuda.is_available()

    # Set the seed for reproducing the results
    random.seed(args.manualSeed)
    np.random.seed(args.manualSeed)
    torch.manual_seed(args.manualSeed)
    if use_cuda:
        torch.cuda.manual_seed_all(args.manualSeed)
        torch.backends.cudnn.enabled = True
        cudnn.benchmark = True

    reluslope = 0.0
    startepoch = 0
    kwargs = {'num_workers': 5, 'pin_memory': True} if use_cuda else {}

    # setting up dataset specific objects
    trainset = DCCPT_data(root=datadir, train=True, h5=args.h5)
    testset = DCCPT_data(root=datadir, train=False, h5=args.h5)

    numeval = len(trainset) + len(testset)


    # For simplicity, I have created placeholder for each datasets and model
    if args.db == 'mnist':
        net_s = extract_sdae_mnist(slope=reluslope, dim=args.dim)
        net_z = extract_sdae_mnist(slope=reluslope, dim=args.dim)
    else:
        print("db not supported: '{}'".format(args.db))
        raise

    totalset = torch.utils.data.ConcatDataset([trainset, testset])

    # extracting training data from the pretrained.mat file
    data, labels, pairs, Z, sampweight = makeDCCinp(args)

    # computing and initializing the hyperparams
    _sigma1, _sigma2, _lambda, _delta, _delta1, _delta2, lmdb, lmdb_data = computeHyperParams(pairs, Z, args.step)
    oldassignment = np.zeros(len(pairs))
    stopping_threshold = int(math.ceil(cfg.STOPPING_CRITERION * float(len(pairs))))

    # Create dataset and random batch sampler for Finetuning stage
    trainset = DCCFT_data(pairs, data, sampweight)
    batch_sampler = DCCSampler(trainset, shuffle=True, batch_size=args.batchsize)

    # setting up data loader for training and testing phase
    trainloader = torch.utils.data.DataLoader(trainset, batch_sampler=batch_sampler, **kwargs)
    testloader = torch.utils.data.DataLoader(totalset, batch_size=args.batchsize, shuffle=False, **kwargs)


    if args.step == 1:

        pretraining_filename = os.path.join(outputdir, args.torchmodel_pretraining)
        if os.path.isfile(pretraining_filename):
            print("==> loading params from pretraining checkpoint '{}'".format(pretraining_filename))
            pretraining_checkpoint = torch.load(pretraining_filename)
        else:
            print("==> no pretraining checkpoint found at '{}'".format(pretraining_filename))
            raise


        # setting up optimizer - the bias params should have twice the learning rate w.r.t. weights params
        bias_params = filter(lambda x: ('bias' in x[0]), net_s.named_parameters())
        bias_params = list(map(lambda x: x[1], bias_params))
        nonbias_params = filter(lambda x: ('bias' not in x[0]), net_s.named_parameters())
        nonbias_params = list(map(lambda x: x[1], nonbias_params))

        # copying model params from Pretrained (SDAE) weights file
        net_s.load_state_dict(pretraining_checkpoint['state_dict'])

        criterion_sc = DCCLoss(Z.shape[0], Z.shape[1], Z, size_average=True)
        optimizer_sc = optim.Adam([{'params': bias_params, 'lr': 2*args.lr},
                            {'params': nonbias_params},
                            {'params': criterion_sc.parameters(), 'lr': args.lr},
                            ], lr=args.lr, betas=(0.99, 0.999))
        criterion_rec = DCCWeightedELoss(size_average=True) # OLD


        if use_cuda:
            net_s.cuda()
            criterion_sc = criterion_sc.cuda()
            criterion_rec = criterion_rec.cuda()

        # this is needed for WARM START
        if args.resume:
            filename = outputdir+'/FTcheckpoint_%d.pth.tar' % args.level
            if os.path.isfile(filename):
                print("==> loading checkpoint '{}'".format(filename))
                checkpoint = torch.load(filename)
                net_s.load_state_dict(checkpoint['state_dict_s'])
                criterion_sc.load_state_dict(checkpoint['criterion_state_dict_sc'])
                startepoch = checkpoint['epoch']
                optimizer_sc.load_state_dict(checkpoint['optimizer_sc'])
                _sigma1 = checkpoint['sigma1']
                _sigma2 = checkpoint['sigma2']
                _lambda = checkpoint['lambda']
                _delta = checkpoint['delta']
                _delta1 = checkpoint['delta1']
                _delta2 = checkpoint['delta2']
            else:
                print("==> no checkpoint found at '{}'".format(filename))
                raise

        # This is the actual Algorithm
        flag = 0
        for epoch in range(startepoch, args.nepoch):
            print('sigma1', _sigma1, epoch)
            print('sigma2', _sigma2, epoch)
            print('lambda', _lambda, epoch)
            if args.tensorboard:
                log_value('sigma1', _sigma1, epoch)
                log_value('sigma2', _sigma2, epoch)
                log_value('lambda', _lambda, epoch)

            train_step_1(trainloader, net_s, optimizer_sc, criterion_rec, criterion_sc, epoch, use_cuda, _sigma1, _sigma2, _lambda)
            Z, U, change_in_assign, assignment = test(testloader, net_s, criterion_sc, epoch, use_cuda, _delta, pairs, numeval, flag)

            if flag:
                # As long as the change in label assignment < threshold, DCC continues to run.
                # Note: This condition is always met in the very first epoch after the flag is set.
                # This false criterion is overwritten by checking for the condition twice.
                if change_in_assign > stopping_threshold:
                    flag += 1

            if((epoch+1) % args.M == 0):
                _sigma1 = max(_delta1, _sigma1 / 2)
                _sigma2 = max(_delta2, _sigma2 / 2)
                if _sigma2 == _delta2 and flag == 0:
                    # Start checking for stopping criterion
                    flag = 1

            # Save checkpoint
            index = (epoch // args.M) * args.M
            save_checkpoint({'epoch': epoch+1,
                             'state_dict_s': net_s.state_dict(),
                             'criterion_state_dict_sc': criterion_sc.state_dict(),
                             'optimizer_sc': optimizer_sc.state_dict(),
                             'sigma1': _sigma1,
                             'sigma2': _sigma2,
                             'lambda': _lambda,
                             'delta': _delta,
                             'delta1': _delta1,
                             'delta2': _delta2,
                             }, index, filename=outputdir)

            sio.savemat(os.path.join(outputdir, 'features_s'), {'Z': Z, 'U': U, 'gtlabels': labels, 'w': pairs, 'cluster':assignment})

    elif args.step == 2:
        filename = os.path.join(outputdir, args.torchmodel)
        if os.path.isfile(filename):
            print("==> loading params from checkpoint '{}'".format(filename))
            checkpoint = torch.load(filename)
        else:
            print("==> no checkpoint found at '{}'".format(filename))
            raise

        # copying model params of s encoder from step 1
        net_s.load_state_dict(checkpoint['state_dict_s'])

        # freezing net_s
        for param in net_s.parameters():
            param.requires_grad = False

        net_d = DecoderNet(1)
        criterion_d = nn.MSELoss()

        # setting up optimizer - the bias params should have twice the learning rate w.r.t. weights params
        bias_params = filter(lambda x: ('bias' in x[0]), net_z.named_parameters())
        bias_params = list(map(lambda x: x[1], bias_params))
        nonbias_params = filter(lambda x: ('bias' not in x[0]), net_z.named_parameters())
        nonbias_params = list(map(lambda x: x[1], nonbias_params))

        criterion_zc = DCCLoss(Z.shape[0], Z.shape[1], Z, size_average=True)
        optimizer_zc = optim.Adam([{'params': bias_params, 'lr': 2*args.lr},
                            {'params': nonbias_params},
                            {'params': criterion_zc.parameters(), 'lr': args.lr},
                            ], lr=args.lr, betas=(0.99, 0.999))
        optimizer_d = torch.optim.Adam(net_d.parameters(), lr=0.001)
        criterion_rec = DCCWeightedELoss(size_average=True)
        if use_cuda:
            net_d.cuda()
            net_s.cuda()
            net_z.cuda()
            criterion_zc = criterion_zc.cuda()
            criterion_d = criterion_d.cuda()
            criterion_rec = criterion_rec.cuda()

        flag = 0
        for epoch in range(startepoch, args.nepoch):
            print('sigma1', _sigma1, epoch)
            print('sigma2', _sigma2, epoch)
            print('lambda', _lambda, epoch)
            if args.tensorboard:
                log_value('sigma1', _sigma1, epoch)
                log_value('sigma2', _sigma2, epoch)
                log_value('lambda', _lambda, epoch)

            train_step_2(trainloader, net_s, net_z, net_d, optimizer_zc, optimizer_d, criterion_rec, criterion_zc, criterion_d, epoch, use_cuda, _sigma1, _sigma2, _lambda)
            Z, U, change_in_assign, assignment = test(testloader, net_z, criterion_zc, epoch, use_cuda, _delta, pairs, numeval, flag)


            if flag:
                # As long as the change in label assignment < threshold, DCC continues to run.
                # Note: This condition is always met in the very first epoch after the flag is set.
                # This false criterion is overwritten by checking for the condition twice.
                if change_in_assign > stopping_threshold:
                    flag += 1

            if((epoch+1) % args.M == 0):
                _sigma1 = max(_delta1, _sigma1 / 2)
                _sigma2 = max(_delta2, _sigma2 / 2)
                if _sigma2 == _delta2 and flag == 0:
                    # Start checking for stopping criterion
                    flag = 1

            # Save checkpoint
            index = (epoch // args.M) * args.M
            save_checkpoint({'epoch': epoch+1,
                             'state_dict_s': net_s.state_dict(),
                             'state_dict_z': net_z.state_dict(),
                             'state_dict_d': net_d.state_dict(),
                             'criterion_state_dict_zc': criterion_zc.state_dict(),
                             'optimizer_zc': optimizer_zc.state_dict(),
                             'sigma1': _sigma1,
                             'sigma2': _sigma2,
                             'lambda': _lambda,
                             'delta': _delta,
                             'delta1': _delta1,
                             'delta2': _delta2,
                             }, index, filename=outputdir)

        sio.savemat(os.path.join(outputdir, 'features_z'), {'Z': Z, 'U': U, 'gtlabels': labels, 'w': pairs, 'cluster':assignment})



    else:
        raise(ValueError("step not recognized!"))