Esempio n. 1
0
def main():
    global args, best_prec1
    args = parser.parse_args()
    print(args)

    # Create dataloader.
    print '====> Creating dataloader...'
    test_loader = get_test_loader(args)

    # Load GRM network.
    print '====> Loading the network...'
    model = GRM(num_classes=args.num_classes,
                adjacency_matrix=args.adjacency_matrix)
    # print model

    # Load fine-tuned weight of network.
    if args.weights:
        if os.path.isfile(args.weights):
            print("====> loading model '{}'".format(args.weights))
            checkpoint = torch.load(args.weights)
            checkpoint_dict = {
                k.replace('module.', ''): v
                for k, v in checkpoint['state_dict'].items()
            }
            model.load_state_dict(checkpoint_dict)
        else:
            print("====> no pretrain model at '{}'".format(args.weights))

    model.fg = torch.nn.DataParallel(model.fg)
    model.full_im_net = torch.nn.DataParallel(model.full_im_net)
    model.cuda()

    criterion = nn.CrossEntropyLoss().cuda()

    cudnn.benchmark = True

    fnames = []
    if args.write_out:
        print '------Write out result---'
        for i in range(args.num_classes):
            fnames.append(open(args.result_path + str(i) + '.txt', 'w'))

    validate(test_loader, model, criterion, fnames)

    if args.write_out:
        for i in range(args.num_classes):
            fnames[i].close()
    return
def main():
    # global args, best_prec1
    args = parser.parse_args()
    print('\n====> Input Arguments')
    print(args)
    # Tensorboard writer.
    global writer
    writer = SummaryWriter(log_dir=args.result_path)

    # Create dataloader.
    print '\n====> Creating dataloader...'
    train_loader = get_train_loader(args)
    test_loader = get_test_loader(args)

    # Load Resnet_a network.
    print '====> Loading the network...'
    model = Inception_a(num_class=args.num_class,
                        num_frame=args.num_frame,
                        pretrained=True)

    # Load single frame pretrain.
    pretrain_model = torch.load('models/pretrain_inc_sf.pth')
    keys = model.state_dict().keys()
    new_state_dict = {}
    for i, k in enumerate(pretrain_model.keys()):
        new_state_dict[keys[i]] = pretrain_model[k]
    model.load_state_dict(new_state_dict)
    """Load checkpoint and weight of network.
	"""
    global cp_recorder
    if args.checkpoint_dir:
        cp_recorder = Checkpoint(args.checkpoint_dir, args.checkpoint_name)
        cp_recorder.load_checkpoint(model)

    model = nn.DataParallel(model)
    model.cuda()
    criterion = nn.CrossEntropyLoss().cuda()
    cudnn.benchmark = True

    # optimizer = torch.optim.SGD(model.module.classifier.parameters(), lr=args.lr, weight_decay=args.wd, momentum=args.momentum)
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                weight_decay=args.wd,
                                momentum=args.momentum)

    # Train Resnet_a model.
    print '====> Training...'
    for epoch in range(cp_recorder.contextual['b_epoch'], args.epoch):
        _, _, prec_tri, rec_tri, ap_tri = train_eval(train_loader, test_loader,
                                                     model, criterion,
                                                     optimizer, args, epoch)
        top1_avg_val, loss_avg_val, prec_val, rec_val, ap_val = validate_eval(
            test_loader, model, criterion, args, epoch)

        # Print result.
        writer.add_scalars('mAP (per epoch)',
                           {'train': np.nan_to_num(ap_tri).mean()}, epoch)
        writer.add_scalars('mAP (per epoch)',
                           {'valid': np.nan_to_num(ap_val).mean()}, epoch)

        print_score = False
        if print_score:
            print('\n====> Scores')
            print(
                '[Epoch {0}]:\n'
                '  Train:\n'
                '    Prec@1 {1}\n'
                '    Recall {2}\n'
                '    AP {3}\n'
                '    mAP {4:.3f}\n'
                '  Valid:\n'
                '    Prec@1 {5}\n'
                '    Recall {6}\n'
                '    AP {7}\n'
                '    mAP {8:.3f}\n'.format(epoch, prec_tri, rec_tri, ap_tri,
                                           np.nan_to_num(ap_tri).mean(),
                                           prec_val, rec_val, ap_val,
                                           np.nan_to_num(ap_val).mean()))

        # Record.
        writer.add_scalars('Loss (per batch)', {'valid': loss_avg_val},
                           (epoch + 1) * len(train_loader))
        writer.add_scalars('Prec@1 (per batch)', {'valid': top1_avg_val},
                           (epoch + 1) * len(train_loader))
        writer.add_scalars('mAP (per batch)',
                           {'valid': np.nan_to_num(ap_val).mean()},
                           (epoch + 1) * len(train_loader))

        # Save checkpoint.
        cp_recorder.record_contextual({
            'b_epoch': epoch + 1,
            'b_batch': -1,
            'prec': top1_avg_val,
            'loss': loss_avg_val,
            'class_prec': prec_val,
            'class_recall': rec_val,
            'class_ap': ap_val,
            'mAP': np.nan_to_num(ap_val).mean()
        })
        cp_recorder.save_checkpoint(model)
def main():
	# global args, best_prec1
	args = parser.parse_args()
	print('\n====> Input Arguments')
	print(args)
	# Tensorboard writer.
	global writer
	writer = SummaryWriter(log_dir=args.result_path)


	# Create dataloader.
	print '\n====> Creating dataloader...'
	train_loader = get_train_loader(args)
	test_loader = get_test_loader(args)

	# Load First Glance network.
	print '====> Loading the network...'
	model = First_Glance(num_classes=args.num_classes, pretrained=True)

	"""Load checkpoint and weight of network.
	"""
	global cp_recorder
	if args.checkpoint_dir:
		cp_recorder = Checkpoint(args.checkpoint_dir, args.checkpoint_name)
		cp_recorder.load_checkpoint(model)
	

	model.cuda()
	criterion = nn.CrossEntropyLoss().cuda()
	cudnn.benchmark = True
	optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wd, momentum=args.momentum)
			
	# Train first-glance model.
	print '====> Training...'
	for epoch in range(cp_recorder.contextual['b_epoch'], args.epoch):
		_, _, prec_tri, rec_tri, ap_tri = train_eval(train_loader, test_loader, model, criterion, optimizer, args, epoch)
		top1_avg_val, loss_avg_val, prec_val, rec_val, ap_val = validate_eval(test_loader, model, criterion, args, epoch)

		# Print result.
		writer.add_scalars('mAP (per epoch)', {'train': np.nan_to_num(ap_tri).mean()}, epoch)
		writer.add_scalars('mAP (per epoch)', {'valid': np.nan_to_num(ap_val).mean()}, epoch)
		print('\n====> Scores')
		print('[Epoch {0}]:\n'
			'  Train:\n'
			'    Prec@1 {1}\n'
			'    Recall {2}\n'
			'    AP {3}\n'
			'    mAP {4:.3f}\n'
			'  Valid:\n'
			'    Prec@1 {5}\n'
			'    Recall {6}\n'
			'    AP {7}\n'
			'    mAP {8:.3f}\n'.format(epoch, 
				prec_tri, rec_tri, ap_tri, np.nan_to_num(ap_tri).mean(),
				prec_val, rec_val, ap_val, np.nan_to_num(ap_val).mean()))
		

		# Record.
		writer.add_scalars('Loss (per batch)', {'valid': loss_avg_val}, (epoch+1)*len(train_loader))
		writer.add_scalars('Prec@1 (per batch)', {'valid': top1_avg_val}, (epoch+1)*len(train_loader))
		writer.add_scalars('mAP (per batch)', {'valid': np.nan_to_num(ap_val).mean()}, (epoch+1)*len(train_loader))

		# Save checkpoint.
		cp_recorder.record_contextual({'b_epoch': epoch+1, 'b_batch': -1, 'prec': top1_avg_val, 'loss': loss_avg_val, 
			'class_prec': prec_val, 'class_recall': rec_val, 'class_ap': ap_val, 'mAP': np.nan_to_num(ap_val).mean()})
		cp_recorder.save_checkpoint(model)
Esempio n. 4
0
def main():
    # global args, best_prec1
    args = parser.parse_args()
    print('\n====> Input Arguments')
    print(args)
    # Tensorboard writer.
    global writer
    writer = SummaryWriter(log_dir=args.result_path)

    # Create dataloader.
    print '\n====> Creating dataloader...'
    train_loader = get_train_loader(args)
    test_loader = get_test_loader(args)

    # Load GRM network.
    print '====> Loading the GRM network...'
    model = GRM(num_classes=args.num_classes,
                adjacency_matrix=args.adjacency_matrix)

    # Load First-Glance network.
    print '====> Loading the finetune First Glance model...'
    if args.fg_finetune and os.path.isfile(args.fg_finetune):
        model.fg.load_state_dict(torch.load(args.fg_finetune))
    else:
        print("No find '{}'".format(args.fg_finetune))

    # Load checkpoint and weight of network.
    global cp_recorder
    if args.checkpoint_dir:
        cp_recorder = Checkpoint(args.checkpoint_dir, args.checkpoint_name)
        cp_recorder.load_checkpoint(model)

    model.fg = torch.nn.DataParallel(model.fg)
    model.full_im_net = torch.nn.DataParallel(model.full_im_net)
    model.cuda()
    criterion = nn.CrossEntropyLoss().cuda()
    cudnn.benchmark = True
    optimizer_cls = torch.optim.SGD(model.classifier.parameters(),
                                    lr=args.lr,
                                    weight_decay=args.wd,
                                    momentum=args.momentum)
    optimizer_ggnn = torch.optim.Adam(model.ggnn.parameters(),
                                      lr=0.00001,
                                      weight_decay=args.wd)

    # Train GRM model.
    print '====> Training...'
    for epoch in range(cp_recorder.contextual['b_epoch'], args.epoch):
        _, _, prec_tri, rec_tri, ap_tri = train_eval(train_loader, test_loader,
                                                     model, criterion,
                                                     optimizer_cls,
                                                     optimizer_ggnn, args,
                                                     epoch)
        top1_avg_val, loss_avg_val, prec_val, rec_val, ap_val = validate_eval(
            test_loader, model, criterion, args, epoch)

        # Print result.
        writer.add_scalars('mAP (per epoch)',
                           {'train': np.nan_to_num(ap_tri).mean()}, epoch)
        writer.add_scalars('mAP (per epoch)',
                           {'valid': np.nan_to_num(ap_val).mean()}, epoch)
        print('\n====> Scores')
        print(
            '[Epoch {0}]:\n'
            '  Train:\n'
            '    Prec@1 {1}\n'
            '    Recall {2}\n'
            '    AP {3}\n'
            '    mAP {4:.3f}\n'
            '  Valid:\n'
            '    Prec@1 {5}\n'
            '    Recall {6}\n'
            '    AP {7}\n'
            '    mAP {8:.3f}\n'.format(epoch, prec_tri, rec_tri, ap_tri,
                                       np.nan_to_num(ap_tri).mean(), prec_val,
                                       rec_val, ap_val,
                                       np.nan_to_num(ap_val).mean()))

        # Record.
        writer.add_scalars('Loss (per batch)', {'valid': loss_avg_val},
                           (epoch + 1) * len(train_loader))
        writer.add_scalars('Prec@1 (per batch)', {'valid': top1_avg_val},
                           (epoch + 1) * len(train_loader))
        writer.add_scalars('mAP (per batch)',
                           {'valid': np.nan_to_num(ap_val).mean()},
                           (epoch + 1) * len(train_loader))

        # Save checkpoint.
        cp_recorder.record_contextual({
            'b_epoch': epoch + 1,
            'b_batch': -1,
            'prec': top1_avg_val,
            'loss': loss_avg_val,
            'class_prec': prec_val,
            'class_recall': rec_val,
            'class_ap': ap_val,
            'mAP': np.nan_to_num(ap_val).mean()
        })
        cp_recorder.save_checkpoint(model)