Beispiel #1
0
    def __init__(self, args):
        super(groupAgent).__init__()

        self.args = args
        self.logger = logging.getLogger()
        # initialize my counters
        self.current_epoch = 0

        if self.args.mode == 'eval':
            pass
        else:

            # initialize tensorboard writer
            self.summary_writer = SummaryWriter(self.args.tensorboard_dir)

            # Create an instance from the data loader
            if self.args.debug:
                _, _, _, self.train_loader = data_loader(
                    debug_data_folder=self.args.validation_data_folder,
                    num_workers=self.args.num_workers,
                    aug_type=self.args.aug_type)
                self.validation_loader = self.train_loader
                self.test_loader = self.train_loader
            else:
                self.train_loader, self.validation_loader, self.test_loader, _ = data_loader(
                    train_data_folder=self.args.train_data_folder,
                    validation_data_folder=self.args.validation_data_folder,
                    test_data_folder=self.args.test_data_folder,
                    num_workers=self.args.num_workers,
                    aug_type=self.args.aug_type)

            # Create an instance from the Model
            self.model = model.regnet.RegNet_single(
                dim=self.args.dim,
                n=self.args.num_images_per_group,
                scale=self.args.scale,
                depth=self.args.depth,
                initial_channels=self.args.initial_channels,
                normalization=self.args.normalization).to(device)

            self.logger.info(self.model)
            self.logger.info(
                f"Total Trainable Params: {count_parameters(self.model)}")

            # Create instance from the loss
            self.ncc_loss = model.loss.LNCC(
                self.args.dim, self.args.ncc_window_size).to(device)
            self.spatial_transform = SpatialTransformer(dim=self.args.dim)

            # Create instance from the optimizer
            self.optimizer = torch.optim.Adam(self.model.parameters(),
                                              lr=self.args.learning_rate)

            # Model Loading from the latest checkpoint if not found start from scratch.
            self.load_checkpoint()
Beispiel #2
0
def main():

    args = easydict.EasyDict({
        "cuda": True,
        "data_place": "./dataset",
        "model_type": "Profile",
    })

    #get input image
    n = 20  # number of image show
    #images, id_labels, Nd, channel_num = data_loader(args.data_place, args.model_type)
    #jpg_image = convert_image(images)
    #image_list = np.random.randint(0,len(images), (1,n))[0]

    #load image
    protocol_dir = './dataset/cfp-dataset/Protocol/Split'
    pair_type = 'FP'
    images_f, id_labels_f, Nd, channel_num = data_loader(
        args.data_place, 'Front')
    images_p, id_labels_p, Nd, channel_num = data_loader(
        args.data_place, 'Profile')

    #make pair data
    final_images_p, final_images_f = make_pair_data(images_p, images_f,
                                                    protocol_dir, pair_type)

    model_P = Model_P.Generator(50, 3)
    path_to_model_P = './snapshot/Model_P/l1_loss_remove_emb/epoch24_G.pt'
    model_P = torch.load(path_to_model_P)

    #gen img
    transformed_data = FaceIdPoseDataset_v2(final_images_p,
                                            final_images_f,
                                            final_images_p,
                                            final_images_p,
                                            transforms=transforms.Compose([
                                                Resize((110, 110)),
                                                RandomCrop((96, 96))
                                            ]))
    dataloader = DataLoader(transformed_data,
                            batch_size=32,
                            shuffle=False,
                            pin_memory=True)
    if args.cuda:
        model_P.cuda()
    Nz = 50
    model_P.eval()
    gen_img = []
    for i, batch_img in enumerate(dataloader):
        input_img = batch_img[0]
        #print(input_img)
        front_img = batch_img[1]
        generated_imgs = Generate_Image_v2(input_img, model_P, Nz, args)
        show_image(input_img, front_img, generated_imgs, i)
Beispiel #3
0
def main():
    args = parse_args()

    infos = [
        ('cfp_split_10/epoch1000_G.pt', 'cfp_split_10/epoch1000_D.pt', '10'),
        ('cfp_split_09/epoch1000_G.pt', 'cfp_split_09/epoch1000_D.pt', '09'),
        ('cfp_split_08/epoch1000_G.pt', 'cfp_split_08/epoch1000_D.pt', '08'),
        ('cfp_split_07/epoch729_G.pt', 'cfp_split_07/epoch729_D.pt', '07'),
        ('cfp_split_06/epoch1000_G.pt', 'cfp_split_06/epoch1000_D.pt', '06'),
        ('cfp_split_05/epoch1000_G.pt', 'cfp_split_05/epoch1000_D.pt', '05'),
        ('cfp_split_04/epoch1000_G.pt', 'cfp_split_04/epoch1000_D.pt', '04'),
        ('cfp_split_03/epoch1000_G.pt', 'cfp_split_03/epoch1000_D.pt', '03'),
        ('cfp_split_02/epoch1000_G.pt', 'cfp_split_02/epoch1000_D.pt', '02'),
        ('cfp_split_01/epoch1000_G.pt', 'cfp_split_01/epoch1000_D.pt', '01')
    ]
    model_name = 'CFP'
    args.save_dir = os.path.join(args.save_dir, 'Evaluate', model_name)
    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)
    G_model = Model_P.Generator(50, 3)
    D_model = Model_P.Discriminator(500, 3)
    frontal_img, id_label_f, Nd, channel_num = data_loader(
        args.data_place, 'Front')
    profile_img, id_label_p, Nd, channel_num = data_loader(
        args.data_place, 'Profile')
    data_num_f = len(frontal_img)
    data_num_p = len(profile_img)

    for info in infos:
        G_model_path, D_model_path, split_id = info
        G_model_path = os.path.join(args.model_dir, G_model_path)
        D_model_path = os.path.join(args.model_dir, D_model_path)
        G_model = torch.load(G_model_path)
        D_model = torch.load(D_model_path)
        #extract_feat(G_model, D_model, frontal_img, id_label_f, profile_img, id_label_p, data_num_f, data_num_p, split_id, args)

    eval_roc_main(args.save_dir, args.save_dir)
Beispiel #4
0
def main():
	args = parse_args()

	if args.model_type == 'Front':
		args.save_dir = os.path.join(args.save_dir, 'Front',datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
	elif args.model_type == 'Profile':
		args.save_dir = os.path.join(args.save_dir, 'Profile',datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
	else:
		args.save_dir = os.path.join(args.save_dir, 'Transfer',datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
	os.makedirs(args.save_dir)

	print("Parameters:")
	for attr, value in sorted(args.__dict__.items()):
		text ="\t{}={}\n".format(attr.upper(), value)
		print(text)
		with open('{}/Parameters.txt'.format(args.save_dir),'a') as f:
			f.write(text)

	if args.model_type != 'Transfer':
		#input data
		images, id_labels, Nd, channel_num = data_loader(args.data_place, args.model_type)
		D = single_model.Discriminator(Nd, channel_num)
		G = single_model.Generator(channel_num)
		train_single_GAN(images, id_labels, Nd, D, G, args)
	else:
		#initialize G and D for transfer block
		front_feat_file= './dataset/cfp-dataset/front_feat.bin'
		profile_feat_file = './dataset/cfp-dataset/profile_feat.bin'
		protocol_dir = './dataset/cfp-dataset/Protocol/Split'
		pair_type = 'FP'
		
		final_front_pair, final_profile_pair, final_front_id, final_profile_id = \
		read_bin_cfp(front_feat_file, profile_feat_file, protocol_dir, pair_type)

		# print(np.shape(final_front_pair), np.shape(final_profile_pair))
		# print(np.count_nonzero(final_front_id - final_profile_id))
		Nd = 500
		D = transfer_block.Discriminator(320,1)
		#D = transfer_block.Discriminator(320,Nd + 1)
		G = transfer_block.Generator(320, 320)
		train_transfer_block(final_front_pair, final_profile_pair, final_front_id, Nd, D, G, args)
Beispiel #5
0
    user_metapath = tf.placeholder(name="user_metapath", shape=[None, 3], dtype=tf.int32)
    item_metapath = tf.placeholder(name="item_metapath", shape=[None, 3], dtype=tf.int32)
    label = tf.placeholder(name="label", shape=[None, 5], dtype=tf.int32)
    is_training = tf.placeholder(name="train_mode", shape=[], dtype=tf.bool)

    model = BaseMetapathModel(user_num=args.user_num, item_num=args.item_num)
    output = model.forward(user_metapath=user_metapath, item_metapath=item_metapath, is_training=is_training)

    greg_loss = 5e-5 * tf.reduce_mean([tf.nn.l2_loss(x) for x in tf.trainable_variables()])
    label_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=output, labels=label))
    total_loss = label_loss + greg_loss
    total_loss = label_loss
    train_option = tf.train.AdamOptimizer(args.learning_rate).minimize(total_loss)

    all_meta_path = get_input_metapath_instance_list()
    train_iterator = data_loader(input_metapath_instance_list=all_meta_path, mode="train", batch_size=args.batch_size)
    # valid_iterator = valid_data_loader(input_metapath_instance_list=all_meta_path, mode="test",
    #                                    batch_size=args.batch_size)
    # test_iterator = valid_data_loader(input_metapath_instance_list=all_meta_path, mode="val",
    #                                   batch_size=args.batch_size)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as session:
        session.run(tf.global_variables_initializer())
        train_loss_list = list()
        for step in range(args.training_step):

            train_batch_user_metapaths, train_batch_item_metapaths, train_batch_labels = next(train_iterator)
Beispiel #6
0
def main_hin(args):
    '''
        The main function to run. (train / val / test)
    '''
    data_dir = args.data_dir
    dataset_name = args.dataset_name
    assert dataset_name.lower() in ['acm', 'imdb', 'dblp']

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    num_nodes, num_classes, node_features, data_split, adjs, \
    edge_list, edge_type, node_type_i, node_type_j, n_edge_type = \
        data_loader(data_dir, dataset_name, device)

    trn_node, trn_label, val_node, val_label, tst_node, tst_label = data_split
    adj_GTN, adj_graphsage, nx_graph = adjs

    in_feats = node_features.size(1)
    g = nx_graph.to_directed()

    if args.self_loop:
        g.remove_edges_from(nx.selfloop_edges(g))
        g.add_edges_from(zip(g.nodes(), g.nodes()))
    dgl_g = DGLGraph(g)
    n_edges = dgl_g.number_of_edges()
    degs = dgl_g.in_degrees().float()
    norm = torch.pow(degs, -0.5)
    norm[torch.isinf(norm)] = 0
    norm = norm.to(device)
    dgl_g.ndata['norm'] = norm.unsqueeze(1)

    model_args = {
        "g"         : dgl_g,
        "in_feats"  : in_feats,
        "n_classes" : num_classes,
        "n_hidden"  : args.hidden,
        "dropout"   : args.dropout,
        "activation": F.relu,
    }

    gen_type   = args.gen_type
    post_type  = args.post_type

    gen_config = copy.deepcopy(model_args)
    gen_config["type"]               = gen_type
    gen_config["neg_ratio"]          = args.neg_ratio
    gen_config['hidden_x']           = args.hidden_x
    gen_config['aspect_embed_size']  = args.aspect_embed_size
    gen_config['nx_g']               = g
    gen_config['n_edge_type']        = n_edge_type
    gen_config['n_layers']           = args.n_gnn_layers

    post_config = copy.deepcopy(model_args)
    post_config["type"]              = post_type
    post_config['aspect_embed_size'] = args.aspect_embed_size
    
    if post_type == 'graphsage':
        post_config['n_layers']        = args.n_gnn_layers
        post_config['aggregator_type'] = args.aggregator_type
    elif post_type == 'a2gnn':
        post_config['a2gnn_num_layer'] = args.a2gnn_num_layer
        post_config['args']            = args
    else:
        post_config['n_layers']        = args.n_gnn_layers

    model = GenGNN(gen_config, post_config).to(device)
    optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    scheduler = lr_scheduler.MultiStepLR(optimizer, [80, 120, 150], 0.6)

    neib_sampler = NeibSampler(nx_graph, args.n_nb).to(device)

    best_val_macro_f1 = -1
    best_tst_macro_f1 = -1
    best_val_micro_f1 = -1
    best_tst_micro_f1 = -1
    for epoch in range(args.n_epochs):
        is_new_best = False
        model.train()
        aspect_embed, logits = model.cal_post(node_features, neib_sampler)
        post_aspect = F.log_softmax(aspect_embed, dim=1)
        y_log_prob  = F.log_softmax(logits, dim=1)
        nll_generative = model.gen.nll_generative(node_features,
                                                  post_aspect,
                                                  trn_node,
                                                  trn_label)
        mask_rate = args.mask_rate
        ss_loss = model.gen.self_supervised(node_features,
                                            aspect_embed,
                                            edge_type,
                                            mask_rate)
        nll_discriminative = F.nll_loss(y_log_prob[trn_node], trn_label)
        trn_loss = args.lamda * (nll_generative + ss_loss) + nll_discriminative

        trn_logits   = logits[trn_node]
        val_logits   = logits[val_node]
        tst_logits   = logits[tst_node]
        trn_logits_  = trn_logits
        val_logits_  = val_logits
        tst_logits_  = tst_logits

        trn_label_   = trn_label.cpu().numpy()
        val_label_   = val_label.cpu().numpy()
        tst_label_   = tst_label.cpu().numpy()
        trn_pred     = trn_logits_.cpu().detach().numpy().argmax(axis=1)
        val_pred     = val_logits_.cpu().detach().numpy().argmax(axis=1)
        tst_pred     = tst_logits_.cpu().detach().numpy().argmax(axis=1)

        trn_macro_f1 = f1_score(trn_label_, trn_pred, average="macro")
        trn_micro_f1 = f1_score(trn_label_, trn_pred, average="micro")
        val_macro_f1 = f1_score(val_label_, val_pred, average='macro')
        val_micro_f1 = f1_score(val_label_, val_pred, average='micro')
        tst_macro_f1 = f1_score(tst_label_, tst_pred, average="macro")
        tst_micro_f1 = f1_score(tst_label_, tst_pred, average="micro")

        if val_macro_f1 > best_val_macro_f1:
            is_new_best = True
            best_val_macro_f1 = val_macro_f1
            best_tst_macro_f1 = tst_macro_f1
        if val_micro_f1 > best_val_micro_f1:
            is_new_best = True
            best_val_micro_f1 = val_micro_f1
            best_tst_micro_f1 = tst_micro_f1

        optimizer.zero_grad()
        trn_loss.backward()
        optimizer.step()
        scheduler.step()

        if is_new_best:
            cprint('epoch:{:>3d}/{}  trn_loss: {:.5f}  trn_macro_f1: {:.4f} | val_macro_f1: {:.4f} | tst_macro_f1: {:.4f}'.format(
                    epoch, args.n_epochs, trn_loss.item(), trn_macro_f1, val_macro_f1, tst_macro_f1), 'green')
        else:
            print('epoch:{:>3d}/{}  trn_loss: {:.5f}  trn_macro_f1: {:.4f} | val_macro_f1: {:.4f} | tst_macro_f1: {:.4f}'.format(
                   epoch, args.n_epochs, trn_loss.item(), trn_macro_f1, val_macro_f1, tst_macro_f1))

    return best_tst_macro_f1, best_tst_micro_f1
Beispiel #7
0
def main():
	args = parse_args()


	front_feat_dir = os.path.join(args.save_dir, 'front_feat.bin')
	model_G_path_f = os.path.join(args.model_dir, 'Front', '2018-05-10_16-51-09', 'epoch1000_G.pt')
	#model_D_path = os.path.join(args.model_dir, 'Front', '2018-05-10_16-51-09', 'epoch1000_D.pt')

	profile_feat_dir = os.path.join(args.save_dir, 'profile_feat.bin')
	model_G_path_p = os.path.join(args.model_dir, 'Profile', '2018-05-11_09-09-05', 'epoch1000_G.pt')
	#model_D_path = os.path.join(args.model_dir, 'Front', '2018-05-10_16-51-09', 'epoch1000_D.pt')

	#prepare input image
	images_f, id_labels_f, Nd, channel_num = data_loader(args.data_dir, 'Front')
	images_p, id_labels_p, Nd, channel_num = data_loader(args.data_dir, 'Profile')

	image_data_f = FaceIdPoseDataset(images_f, id_labels_f,
			transforms=transforms.Compose([
				Resize((110, 110)),
				RandomCrop((96,96))
				]))
	dataloader_f = DataLoader(image_data_f, batch_size=args.batch_size, shuffle=False, pin_memory=True)

	image_data_p = FaceIdPoseDataset(images_p, id_labels_p,
			transforms=transforms.Compose([
				Resize((110, 110)),
				RandomCrop((96,96))
				]))
	dataloader_p = DataLoader(image_data_p, batch_size=args.batch_size, shuffle=False, pin_memory=True)

	#prepare model
	G_f = single_model.Generator(channel_num)
	G_p = single_model.Generator(channel_num)

	G_f = torch.load(model_G_path_f)
	G_p = torch.load(model_G_path_p)


	data_num_f = len(images_f)
	data_num_p = len(images_p)
	feat_dim = 320
	id_label = 1

	if args.cuda:
		G_f = G_f.cuda()
		G_p = G_p.cuda()

	G_f.eval()
	G_p.eval()
	#D.eval()


	with open(front_feat_dir, 'wb') as bin_f:
		bin_f.write(st.pack('iii', data_num_f, feat_dim, id_label))
		for i, batch_data in enumerate(dataloader_f):
			features = []
			batch_image = torch.FloatTensor(batch_data[0].float())
			batch_id_label = batch_data[1]

			if args.cuda:
				batch_image = batch_image.cuda()

			batch_image = Variable(batch_image)
			generated = G_f(batch_image)
			out_feat = G_f.features.cpu().data.numpy() #get feature vectors
			print(batch_id_label)
			feat_num = G_f.features.size(0)

			for j in range(feat_num):
				bin_f.write(st.pack('f'*feat_dim + 'i', *tuple(out_feat[j, :]), batch_id_label[j]))

	with open(profile_feat_dir, 'wb') as bin_f:
		bin_f.write(st.pack('iii', data_num_p, feat_dim, id_label))
		for i, batch_data in enumerate(dataloader_p):
			features = []
			batch_image = torch.FloatTensor(batch_data[0].float())
			batch_id_label = batch_data[1]

			if args.cuda:
				batch_image = batch_image.cuda()

			batch_image = Variable(batch_image)
			generated = G_p(batch_image)
			out_feat = G_p.features.cpu().data.numpy() #get feature vectors
			print(batch_id_label)
			feat_num = G_p.features.size(0)

			for j in range(feat_num):
				bin_f.write(st.pack('f'*feat_dim + 'i', *tuple(out_feat[j, :]), batch_id_label[j]))
def main():
    """Create a TensorRT engine for ONNX-based YOLOv3-608 and run inference."""

    # Try to load a previously generated YOLOv3-608 network graph in ONNX format:
    onnx_file_path = './yolov3.onnx'
    engine_file_path = "yolov3.trt"
    data_path = "./data/unrel.data"

    data = parse_data_cfg(data_path)
    nc = int(data['classes'])  # number of classes
    path = data['valid']  # path to test images
    names = load_classes(data['names'])  # class names

    iouv = torch.linspace(0.5, 0.95, 1,
                          dtype=torch.float32)  # iou vector for [email protected]:0.95
    niou = 1

    conf_thres = 0.001
    iou_thres = 0.6
    verbose = True

    # Genearte custom dataloader
    img_size = 448  # copy form pytorch src
    batch_size = 16

    dataset = LoadImagesAndLabels(path, img_size, batch_size, rect=True)
    batch_size = min(batch_size, len(dataset))
    dataloader = data_loader(dataset, batch_size, img_size)

    # Output shapes expected by the post-processor
    output_shapes = [(16, 126, 14, 14), (16, 126, 28, 28), (16, 126, 56, 56)]

    # Do inference with TensorRT
    trt_outputs = []
    with get_engine(onnx_file_path, engine_file_path
                    ) as engine, engine.create_execution_context() as context:
        inputs, outputs, bindings, stream = common.allocate_buffers(engine)
        s = ('%20s' + '%10s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R',
                                     '[email protected]', 'F1')
        p, r, f1, mp, mr, map, mf1, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
        pbar = tqdm.tqdm(dataloader, desc=s)
        stats, ap, ap_class = [], [], []
        seen = 0

        for batch_i, (imgs, targets, paths, shapes) in enumerate(pbar):

            imgs = imgs.astype(np.float32) / 255.0
            nb, _, height, width = imgs.shape  # batch size, channels, height, width
            whwh = np.array([width, height, width, height])

            inputs[0].host = imgs

            postprocessor_args = {
                "yolo_masks": [
                    (6, 7, 8), (3, 4, 5), (0, 1, 2)
                ],  # A list of 3 three-dimensional tuples for the YOLO masks
                "yolo_anchors": [
                    (10, 13),
                    (16, 30),
                    (33, 23),
                    (30, 61),
                    (
                        62, 45
                    ),  # A list of 9 two-dimensional tuples for the YOLO anchors
                    (59, 119),
                    (116, 90),
                    (156, 198),
                    (373, 326)
                ],
                "num_classes":
                37,
                "stride": [32, 16, 8]
            }

            postprocessor = PostprocessYOLO(**postprocessor_args)

            # Do layers before yolo
            t = time.time()
            trt_outputs = common.do_inference_v2(context,
                                                 bindings=bindings,
                                                 inputs=inputs,
                                                 outputs=outputs,
                                                 stream=stream)

            trt_outputs = [
                output.reshape(shape)
                for output, shape in zip(trt_outputs, output_shapes)
            ]

            trt_outputs = [
                np.ascontiguousarray(
                    otpt[:, :, :int(imgs.shape[2] * (2**i) /
                                    32), :int(imgs.shape[3] * (2**i) / 32)],
                    dtype=np.float32) for i, otpt in enumerate(trt_outputs)
            ]

            output_list = postprocessor.process(trt_outputs)

            t0 += time.time() - t

            inf_out = torch.cat(output_list, 1)
            t = time.time()
            output = non_max_suppression(inf_out,
                                         conf_thres=conf_thres,
                                         iou_thres=iou_thres)  # nms
            t1 += time.time() - t

            # Statistics per image
            for si, pred in enumerate(output):
                labels = targets[targets[:, 0] == si, 1:]
                nl = len(labels)
                tcls = labels[:, 0].tolist() if nl else []  # target class
                seen += 1

                if pred is None:
                    if nl:
                        stats.append((torch.zeros(0, niou, dtype=torch.bool),
                                      torch.Tensor(), torch.Tensor(), tcls))
                    continue

                # Assign all predictions as incorrect
                correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool)
                if nl:
                    detected = []  # target indices
                    tcls_tensor = labels[:, 0]

                    # target boxes
                    tbox = xywh2xyxy(labels[:, 1:5]) * whwh
                    tbox = tbox.type(torch.float32)

                    # Per target class
                    for cls in torch.unique(tcls_tensor):
                        ti = (cls == tcls_tensor).nonzero().view(
                            -1)  # prediction indices
                        pi = (cls == pred[:, 5]).nonzero().view(
                            -1)  # target indices

                        # Search for detections
                        if pi.shape[0]:
                            # Prediction to target ious
                            ious, i = box_iou(pred[pi, :4], tbox[ti]).max(
                                1)  # best ious, indices

                            # Append detections
                            for j in (ious > iouv[0]).nonzero():
                                d = ti[i[j]]  # detected target
                                if d not in detected:
                                    detected.append(d)
                                    correct[pi[j]] = ious[
                                        j] > iouv  # iou_thres is 1xn
                                    if len(
                                            detected
                                    ) == nl:  # all targets already located in image
                                        break

                # Append statistics (correct, conf, pcls, tcls)
                stats.append(
                    (correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))

            # Plot images
            if batch_i < 1:
                f = 'test_batch%g_gt.jpg' % batch_i  # filename
                plot_images(imgs, targets, paths=paths, names=names,
                            fname=f)  # ground truth
                f = 'test_batch%g_pred.jpg' % batch_i
                plot_images(imgs,
                            output_to_target(output, width, height),
                            paths=paths,
                            names=names,
                            fname=f)  # predictions

        # Compute statistics
        stats = [np.concatenate(x, 0) for x in zip(*stats)]  # to numpy
        if len(stats):
            p, r, ap, f1, ap_class = ap_per_class(*stats)
            if niou > 1:
                p, r, ap, f1 = p[:, 0], r[:, 0], ap.mean(
                    1), ap[:, 0]  # [P, R, [email protected]:0.95, [email protected]]
            mp, mr, map, mf1 = p.mean(), r.mean(), ap.mean(), f1.mean()
            nt = np.bincount(stats[3].astype(np.int64),
                             minlength=nc)  # number of targets per class
        else:
            nt = torch.zeros(1)

        # Print results
        pf = '%20s' + '%10.3g' * 6  # print format
        print(pf % ('all', seen, nt.sum(), mp, mr, map, mf1))

        # Print results per class
        if verbose and nc > 1 and len(stats):
            for i, c in enumerate(ap_class):
                print(pf % (names[c], seen, nt[c], p[i], r[i], ap[i], f1[i]))

        # Print speeds
        if verbose:
            t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (
                img_size, img_size, batch_size)  # tuple
            print(
                'Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g'
                % t)
Beispiel #9
0
def main():
    args = parse_args()

    if args.model_type == 'Model_F':
        args.save_dir = os.path.join(
            args.save_dir, 'Model_F',
            datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
    elif args.model_type == 'Model_P':
        args.save_dir = os.path.join(
            args.save_dir, 'Model_P',
            datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
    os.makedirs(args.save_dir)

    print("Parameters:")
    for attr, value in sorted(args.__dict__.items()):
        text = "\t{}={}\n".format(attr.upper(), value)
        print(text)
        with open('{}/Parameters.txt'.format(args.save_dir), 'a') as f:
            f.write(text)

    if args.model_type == 'Model_F':
        #train model model_F
        print("Do nothing")
    else:
        #Train model_P
        #load front feature from model_F
        front_feat_file = './dataset/cfp-dataset/front_feat.bin'
        profile_feat_file = './dataset/cfp-dataset/profile_feat.bin'
        protocol_dir = './dataset/cfp-dataset/Protocol/Split'
        pair_type = 'FP'
        split = args.split_id

        final_front_pair, final_front_id = load_feat(front_feat_file)

        #load image
        images_f, id_labels_f, Nd, channel_num = data_loader(
            args.data_place, 'Front')
        images_p, id_labels_p, Nd, channel_num = data_loader(
            args.data_place, 'Profile')

        #make pair data
        final_images_p, final_images_f, id_labels, final_front_feat = make_pair_data(
            images_p, images_f, id_labels_p, final_front_pair, protocol_dir,
            pair_type, split)

        #initialize model_P
        Nz = 50
        if not args.load_model:
            model_F_D_path = 'C:\\Users\\duyson\\Desktop\\Projects\\FaceNormalize\\PytorchGAN\\snapshot\\Front\\2018-05-10_16-51-09\\epoch1000_D.pt'
            model_F_G_path = 'C:\\Users\\duyson\\Desktop\\Projects\\FaceNormalize\\PytorchGAN\\snapshot\\Front\\2018-05-10_16-51-09\\epoch1000_G.pt'

            D_F = Model_F.Discriminator(Nd, channel_num)
            G_F = Model_F.Generator(channel_num)
            D_P = Model_P.Discriminator(Nd, channel_num)
            G_P = Model_P.Generator(Nz, channel_num)

            D_F = torch.load(model_F_D_path)
            G_F = torch.load(model_F_G_path)
            D_P.convLayers = D_F.convLayers
            D_P.fc = D_F.fc
            G_P.G_dec_convLayers_frontal = G_F.G_dec_convLayers
            train_model_P(final_images_p, final_images_f, final_front_feat,
                          id_labels, Nd, Nz, D_P, G_P, args)
        else:
            path_to_G = './snapshot/Model_P/initial_training/epoch1000_G.pt'
            path_to_D = './snapshot/Model_P/initial_training/epoch1000_D.pt'

            D_P = Model_P.Discriminator(Nd, channel_num)
            G_P = Model_P.Generator(Nz, channel_num)

            D_P = torch.load(path_to_D)
            G_P = torch.load(path_to_G)
            train_model_P(final_images_p, final_images_f, final_front_feat,
                          id_labels, Nd, Nz, D_P, G_P, args)