Beispiel #1
0
def get_model(cfg, num_classes, device, logger):
    if 'decouple' in cfg.NAME:
        if cfg.TRAIN_STAGE == 1:
            model = Network1(cfg, mode="train", num_classes=num_classes)
        else:
            model = Network(cfg,
                            mode="train",
                            num_classes=int(sum(num_classes) / 100) * 100)
    else:
        if isinstance(num_classes, list) and not cfg.MULTI_BRANCH:
            model = Network1(cfg, mode="train", num_classes=num_classes)
        elif isinstance(num_classes, list) and cfg.MULTI_BRANCH:
            model = Network2(cfg, mode="train", num_classes=num_classes)
        else:
            model = Network(cfg, mode="train", num_classes=num_classes)

    if cfg.BACKBONE.FREEZE == True:
        model.freeze_backbone()
        logger.info("Backbone has been freezed")

    if cfg.CPU_MODE:
        model = model.to(device)
    else:
        model = torch.nn.DataParallel(model).cuda()

    return model
Beispiel #2
0
def main():
    read_f = file("./data/train_data", "rb")
    train_generator = cPickle.load(read_f)
    read_f.close()
    read_f = file("./data/emb", "rb")
    embedding_matrix, _, _ = cPickle.load(read_f)
    read_f.close()
    test_generator = DataGenerator("test", args.batch_size)

    model = Network(args.embedding_size, args.embedding_dimension, embedding_matrix, args.hidden_dimension).cuda()
    best_model = Network(args.embedding_size, args.embedding_dimension, embedding_matrix, args.hidden_dimension).cuda()
    optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.l2_reg)

    best_result = 0.0
    for echo in range(args.epoch_num):
        info = "[" + echo * ">" + " " * (args.epoch_num - echo) + "]"
        sys.stderr.write(info + "\r")
        cost1, cost2, cost, total_num = 0.0, 0.0, 0.0, 0
        for data in train_generator.generate_data(shuffle=True):
            zp_rep, npc_rep, np_rep, feature = model.forward(data, dropout=args.dropout)
            output = model.generate_score(zp_rep, npc_rep, np_rep, feature)
            optimizer.zero_grad()
            dis1 = output[data['wid']] - output[data['cid']] + args.margin
            dis2 = output[data['uwid']] - args.wrong_bound
            dis3 = args.correct_bound - output[data['ucid']]
            triplet_loss = torch.sum(dis1 * (dis1 > 0).cuda().float()) + torch.sum(
                dis2 * (dis2 > 0).cuda().float()) + torch.sum(dis3 * (dis3 > 0).cuda().float())

            cos_sim_sum = torch.sum(1 - F.cosine_similarity(np_rep[data['cid1']], np_rep[data['cid2']]))
            sim_w = 0.5

            num = data["result"].shape[0]

            total_loss = triplet_loss + sim_w * cos_sim_sum
            total_loss.backward()

            cost += total_loss.item() * num
            cost1 += triplet_loss.item() * num
            cost2 += cos_sim_sum.item() * num
            total_num += num
            optimizer.step()
        train_re = evaluate_train(train_generator, model)
        dev_re, dev_cost = evaluate_dev(train_generator, model, args.margin)
        if dev_re > best_result:
            best_result = dev_re
            net_copy(best_model, model)
        test_re = evaluate_test(test_generator, model)
        print 'Epoch %s; Train Cost: %.4f, %.4f, %.4f; Train Result: %.4f; Dev Result: %.4f, %.4f; Test Result: %.4f' % (
            echo, cost / total_num, cost1 / total_num, cost2 / total_num, train_re, dev_re, dev_cost, test_re)
    print >> sys.stderr
    torch.save(best_model, "./models/model")
    re = evaluate_test(test_generator, best_model)
    print "Performance on Test: F", re
Beispiel #3
0
def validate(args):
    torch.cuda.set_device(0)

    test_set = Dataset(args.test_path, args.u_mask_path, args.s_mask_up_path, args.s_mask_down_path, args.test_sample_rate)
    test_loader = DataLoader(dataset=test_set, batch_size=args.batch_size, shuffle=False, pin_memory=True)
    model = Network(num_layers=args.num_layers, rank=0)
    # load checkpoint
    model_path = os.path.join(args.model_save_path, 'best_checkpoint.pth.tar')
    assert os.path.isfile(model_path)
    checkpoint = torch.load(model_path, map_location='cuda:{}'.format(0))
    model.load_state_dict(checkpoint['model'])
    print('The model is loaded.')
    model = model.up_network.cuda(0)

    print('Now testing {}.'.format(args.exp_name))
    model.eval()
    with torch.no_grad():
        average_psnr, average_ssim, average_psnr_zerof, average_ssim_zerof, average_time, total_num = 0.0, 0.0, 0.0, 0.0, 0.0, 0
        t = tqdm(test_loader, desc='testing', total=int(len(test_loader)))
        for iter_num, data_batch in enumerate(t):
            label = data_batch[0].to(0, non_blocking=True)
            mask_under = data_batch[1].to(0, non_blocking=True)
            # fname = data_batch[4]
            # slice_id = data_batch[5]
            under_img = rAtA(label, mask_under)
            # inference
            start_time = time.time()
            output, _ = model(under_img.permute(0, 3, 1, 2).contiguous(), mask_under)
            infer_time = time.time() - start_time
            average_time += infer_time
            output = output.permute(0, 2, 3, 1).contiguous()
            # calculate and print test information
            under_img, output, label = under_img.detach().cpu().numpy(), output.detach().cpu().numpy(), label.float().detach().cpu().numpy()
            total_num += under_img.shape[0]
            batch_psnr, batch_ssim, batch_psnr_zerof, batch_ssim_zerof = 0.0, 0.0, 0.0, 0.0
            for i in range(under_img.shape[0]):
                under_slice, output_slice, label_slice = under_img[i].squeeze(), output[i].squeeze(), label[i].squeeze()
                psnr = peak_signal_noise_ratio(label_slice, output_slice, data_range=label_slice.max())
                psnr_zerof = peak_signal_noise_ratio(label_slice, under_slice, data_range=label_slice.max())
                ssim = structural_similarity(label_slice, output_slice, data_range=label_slice.max())
                ssim_zerof = structural_similarity(label_slice, under_slice, data_range=label_slice.max())
                batch_psnr += psnr
                batch_ssim += ssim
                batch_psnr_zerof += psnr_zerof
                batch_ssim_zerof += ssim_zerof
            average_psnr += batch_psnr
            average_ssim += batch_ssim
            average_psnr_zerof += batch_psnr_zerof
            average_ssim_zerof += batch_ssim_zerof
        average_psnr /= total_num
        average_ssim /= total_num
        average_psnr_zerof /= total_num
        average_ssim_zerof /= total_num
        average_time /= total_num
    print('average_time:{:.5f}s\tzerof_psnr:{:.5f}\tzerof_ssim:{:.5f}\ttest_psnr:{:.5f}\ttest_ssim:{:.5f}'.format(
        average_time, average_psnr_zerof, average_ssim_zerof, average_psnr, average_ssim))
Beispiel #4
0
def train_model():
    """
	This function will train model
	Tips: Load test,validation data first
	Then, seperately load training data, since training data is really huge.
	:return:
	"""
    path = '/home/jht00622/wiki_new.pkl'
    data = load_data(path)
    ## extract different type data
    train_dataset = data['train_dataset'] / 255
    train_age_labels = data['train_age_labels']
    #train_gender_labels = data['train_gender_labels']

    valid_dataset = data['valid_dataset'] / 255
    valid_age_labels = data['valid_age_labels']
    #valid_gender_labels = data['valid_gender_labels']

    test_dataset = data['test_dataset'] / 255
    test_age_labels = data['test_age_labels']
    #test_gender_labels = data['test_gender_labels']

    hight = 128
    channel = 1
    batch_size = 50
    learn_rate = 0.001
    n_output = 4
    total_size = train_dataset.shape[0]
    net = Network(n_output=n_output,
                  n_length=hight,
                  learning_rate=learn_rate,
                  batch_size=batch_size,
                  channel=channel,
                  output_graph=False,
                  use_ckpt=False)
    num_steps = 50000
    for i in range(num_steps):
        # randomly sample batch memory from all memory
        indices = np.random.permutation(total_size)[:batch_size]
        batch_x = train_dataset[indices, :, :, :]
        batch_y = train_age_labels[indices, :]
        net.learn(batch_x, batch_y)
        if i % 20 == 0:
            cost, accu_rate = net.get_accuracy_rate(batch_x, batch_y)
            print("Iteration: %i. Train loss %.5f, Minibatch accuracy:"
                  " %.1f%%" % (i, cost, accu_rate))
        if i % 100 == 0:
            cost, accu_rate = net.get_accuracy_rate(valid_dataset,
                                                    valid_age_labels)
            print("Iteration: %i. Validation loss %.5f, Validation accuracy:"
                  " %.1f%%" % (i, cost, accu_rate))
            cost, accu_rate = net.get_accuracy_rate(test_dataset,
                                                    test_age_labels)
            print("Iteration: %i. Test loss %.5f, Test accuracy:"
                  " %.1f%%" % (i, cost, accu_rate))
Beispiel #5
0
    def __init__(self, files):
        super().__init__()
        self.network = Network()
        self.loader = DataLoader(files)
        self.subject_view = None
        self.num_trained = 0
        self.previous = []
        self.draw_mode = App.DRAW_NONE
        self.setGeometry(100, 100, 500, 500)
        self.path = None

        self.next()
Beispiel #6
0
def get_model(cfg, num_classes, device, logger):
    model = Network(cfg, mode="train", num_classes=num_classes)

    if cfg.BACKBONE.FREEZE == True:
        model.freeze_backbone()
        logger.info("Backbone has been freezed")

    if cfg.CPU_MODE:
        model = model.to(device)
    else:
        model = torch.nn.DataParallel(model).cuda()

    return model
Beispiel #7
0
def get_model(cfg, num_classes, device, logger):
    model = Network(cfg, mode="train", num_classes=num_classes)

    if cfg.BACKBONE.FREEZE == True:
        model.freeze_backbone()
        logger.info("Backbone has been freezed")

    if not cfg.DATASET.GENERATE_CAM_BASED_DATASET and cfg.TRAIN.DISTRIBUTED:
        if cfg.TRAIN.SYNCBN:
            model = apex.parallel.convert_syncbn_model(model)
    else:
        model = model.cuda()

    return model
Beispiel #8
0
def standardExtract(generateNet=True):
    '''
    @brief Create a network wrapper that dumps dataset contents
    
    @param generateNet IFF TRUE, a network prototxt will be made and printed
    
    @returns Block(..) function that adds a data layer and dumping functions to a net
    '''
    def Block(net, datasetName, out_path='data'):
        '''
        @brief Add data a data layer and file outputs to a network
        
        @param net Incomplete network definition
        @param datasetName Name of the desired dataset (see Dataset.py)
        @param out_path Output path for file dumps
        '''
        blobs = net.namedBlobs()
        ## Create data layer
        dataset = Dataset.get(name=datasetName, phase='TEST')
        img0, img1, flow_gt = dataset.flowLayer(net)

        if not os.path.isdir(out_path):
            os.makedirs(out_path)
        ## Write configuration file for viewer tool
        f = open('%s/viewer.cfg' % (out_path), 'w')
        f.write('2 2\n')
        f.write('0 0 -img0.ppm\n')
        f.write('1 0 -img1.ppm\n')
        f.write('0 1 -gt.flo\n')
        f.write('1 1 none\n')
        f.close()

        ## Create network file outputs
        net.writeImage(img0, folder=out_path, prefix='', suffix='-img0')
        net.writeImage(img1, folder=out_path, prefix='', suffix='-img1')
        net.writeFlow(flow_gt, folder=out_path, prefix='', suffix='-gt')

    if generateNet:
        net = Network()

        dataset = str(param('dataset'))
        if dataset is None:
            raise Exception('please specify dataset=...')

        Block(net, dataset)

        print net.toProto()

    return Block
Beispiel #9
0
def test():
    import torch.optim as optim
    import numpy as np

    net = Network(128)
    opt = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
    updater = Updater(net, opt)

    x = torch.Tensor([[0.4, 0.3], [0.5, 0.9]])
    t = torch.Tensor([[0.7, 0.1, 0.12], [1.4, -0.4, 0.45]])
    mc = np.array([2, 2, 1])
    sc = np.array([0, -1, 0])

    print(net(x).data.numpy() * mc + sc)

    for i in range(10000):
        loss = updater.step(x, t)
        if i % 1000 + 1 == 1000:
            print("iter {} : {}".format(i + 1, loss))

    print(net(x).data.numpy() * mc + sc)
Beispiel #10
0
def main_train():
	env=gym.make('CartPole-v0').env
	net=Network(load_model='neural_net/net_2.meta', ckpt_location='neural_net', save_dest='neural_net/net_2')
	a=Agent(env, 'neural_net/net_2.meta', 'neural_net')

	NO_EPISODES=10000
	TIMESTEPS=300
	EXPLORATION_PROB=0.2
	DISCOUNT_FACTOR=0.9 #implement
	TRAIN_EVERY_N=5
	RENDER_EVERY_N=1
	VERBOSE=True
	MODIFIED_REWARD=True
	PENALTY=-10
	WRITE_EVERY_N=50
	NO_EPOCHS=2
	BATCH_SIZE=128	
	
	for ep in range(NO_EPISODES):
		prev_state=env.reset()
		for t in range(TIMESTEPS):
			if random.uniform(0, 1)>EXPLORATION_PROB:
				action=a.get_action(prev_state)
			else:
				action=a.get_random_action()
			new_state, reward, done, info = env.step(action)
			if ep%RENDER_EVERY_N==0:
				env.render()
			if done and MODIFIED_REWARD:
				reward=PENALTY
			data_point=[prev_state, action, reward, done, new_state]
			write_data(data_point, WRITE_EVERY_N)
			prev_state=new_state
			if done:
				if VERBOSE:
					print "episode:", ep, "score:", t
				break
		if ep%TRAIN_EVERY_N==0:
			net.train(n_epochs=1, print_every_n=200)
Beispiel #11
0
def main():

    clock = pygame.time.Clock()
    run = True
    n = Network()
    startpos = read_pos(n.get_pos())
    print(startpos)
    p = Player(startpos[0], startpos[1], 100, 100, (0, 255, 0))
    p2 = Player(0, 0, 100, 100, (255, 0, 0))

    while run:
        clock.tick(60)
        p2pos = read_pos(n.send(make_pos((p.x, p.y))))
        print(p2pos)
        p2.x = p2pos[0]
        p2.y = p2pos[1]
        p2.update()

        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                run = False
        p.move()
        redraw(win, p, p2)
#Configure network to reconstruct two provided images (flower,Lincoln)

import numpy as np
import matplotlib
import matplotlib.pyplot as plt

from utils import getData
from net import Network,train

matplotlib.use('Agg')

#Flower
iterations = 1000

X,y = getData(2)
netF = Network(2,3)
netF,loss = train(netF,X,y,iterations,100)

preds = netF.predict(X)
plt.imshow(preds.reshape(133,140,3))
plt.title('Flower')
plt.savefig('Flower2.png')

plt.clf()
plt.plot(np.arange(iterations),loss)
plt.ylabel('Loss')
plt.xlabel('Iterations')
plt.title('Flower Loss')
plt.savefig('Flowerloss2.png')

#Lincoln
def train_model():
	"""
	This function will train model
	Tips: Load test,validation data first
	Then, seperately load training data, since training data is really huge.
	:return:
	"""
	path = '/home/hengtong/project/age_gender/data/small/wiki_new.pkl'
	data = load_data(path)
	## extract different type data
	train_dataset = data['train_dataset']/255
	train_age_labels = data['train_age_labels']
	#train_gender_labels = data['train_gender_labels']

	valid_dataset = data['valid_dataset']/255
	valid_age_labels = data['valid_age_labels']
	#valid_gender_labels = data['valid_gender_labels']

	test_dataset = data['test_dataset']/255
	test_age_labels = data['test_age_labels']
	#test_gender_labels = data['test_gender_labels']

	hight = 128
	channel = 1
	batch_size = 128
	learn_rate = 0.01
	n_output = 4 # age mode
	total_size = train_dataset.shape[0]
	net = Network(
		n_output = n_output,
		n_length=hight,
		learning_rate=learn_rate,
		batch_size=batch_size,
		channel=channel,
		output_graph=False,
		use_ckpt=False
	)

	epoch = 400 # epoch
	iteration = int(total_size / batch_size)
	print iteration
	i = 1  # total training time
	accu_train_age = []
	accu_valid_age = []
	accu_test_age = []
	early_stop =0 # early stopping flag
	train_rate_age = 0

	for e in range(epoch):
		print("-------------------------------")
		print("epoch %d" % (e + 1))
		# randomly sample batch memory from all memory
		indices = np.random.permutation(total_size)
		for ite in range(iteration):
			mini_indices = indices[ite * batch_size:(ite + 1) * batch_size]
			batch_x = train_dataset[mini_indices, :, :, :]
			batch_y_age = train_age_labels[mini_indices, :]
			net.learn(batch_x, batch_y_age)

			if i % 50 == 0:
				cost, train_rate_age= net.get_accuracy_rate(batch_x, batch_y_age)
				print("Iteration: %i. Train loss %.5f, Minibatch gen accuracy:"" %.1f%%"% (i, cost, train_rate_age))
				accu_train_age.append(train_rate_age)

			if i % 50 == 0:
				cost, valid_rate_age = net.get_accuracy_rate(valid_dataset, valid_age_labels)
				print("Iteration: %i. Validation loss %.5f, Validation gen accuracy:" " %.1f%%" % (i, cost, valid_rate_age))
				accu_valid_age.append(valid_rate_age)

				cost, test_rate_age= net.get_accuracy_rate(test_dataset, test_age_labels)
				print("Iteration: %i. Test loss %.5f, Test gen accuracy:"" %.1f%%" % (i, cost, test_rate_age))
				accu_test_age.append(test_rate_age)

			if i % 500 == 0:
				net.save_parameters()

			i = i + 1
		# early stopping
		if train_rate_age == 100:
			if early_stop == 10:
				print("Early Stopping!")
				break
			else:
				early_stop = early_stop + 1

		net.plot_cost()  # plot trainingi cost

		plt.figure()  # plot accuracy
		plt.plot(np.arange(len(accu_train_age)), accu_train_age, label='train age', linestyle='--')
		plt.plot(np.arange(len(accu_valid_age)), accu_valid_age, label='valid age', linestyle='-')
		plt.plot(np.arange(len(accu_test_age)), accu_test_age, label='test age', linestyle=':')
		plt.ylabel('age accuracy')
		plt.xlabel('epoch')
		plt.legend(loc='lower right')
		plt.grid()
		plt.savefig('age.png')
Beispiel #14
0
def standardSingleTest(DeployBlock, generateNet=True):
    '''
    @brief Create Testing-mode wrapper for a Deploy-mode net
    
    @param DeployBlock Deploy-mode Block(..) function
    @param generateNet IFF TRUE, a network prototxt will be made and printed
    
    @returns Testing-mode Block(..) function that adds a data layer to a Deploy-mode
             net
    '''
    def Block(net,
              datasetName,
              output,
              data_size,
              basename,
              prefix=None,
              use_augmentation_mean=True):
        '''
        @brief Add a data layer for dataset "datasetName" to a network
        
        @param net Incomplete network definition
        @param datasetName Name of the desired dataset (see Dataset.py)
        @param output IFF TRUE, the network will write its input and output to disk
        @param prefix Filename prefix for file outputs of "output" is TRUE
        @param use_augmentation_mean IFF TRUE, data mean will be computed on the fly
        '''
        blobs = net.namedBlobs()
        ## Make data layer
        #dataset = Dataset.get(name=datasetName, phase='TEST')

        #img0, img1, flow_gt = dataset.flowLayer(net)
        img0 = net.addInput(1, 3, data_size[0], data_size[1])
        img1 = net.addInput(1, 3, data_size[0], data_size[1])
        flow_gt = net.zeros(1, 2, data_size[0], data_size[1])

        ## Connect data to Deploy-mode net
        flow_pred = DeployBlock(net, img0, img1, flow_gt, data_size[1],
                                data_size[0], None, use_augmentation_mean)

        ## Output network input and output
        if output:
            if prefix:
                out_path = 'output_%s_%s' % (prefix, datasetName)
            else:
                out_path = 'output_%s' % datasetName
            if not os.path.isdir(out_path):
                os.makedirs(out_path)

            ## Write configuration file for viewer tool
            f = open('%s/viewer.cfg' % out_path, 'w')
            f.write('3 2\n')
            f.write('0 0 -img0.ppm\n')
            f.write('1 0 -img1.ppm\n')
            f.write('2 0 EPE(-flow.flo,-gt.flo)\n')
            f.write('0 1 -flow.flo\n')
            f.write('1 1 -gt.flo\n')
            f.write('2 1 DIFF(-flow.flo,-gt.flo)\n')
            f.close()

            ## Create network file outputs
            net.writeImage(img0,
                           folder=out_path,
                           filename=(os.path.join(out_path,
                                                  basename + '-imgL.ppm')))
            net.writeImage(img1,
                           folder=out_path,
                           filename=(os.path.join(out_path,
                                                  basename + '-imgR.ppm')))
            net.writeFloat(flow_pred,
                           folder=out_path,
                           filename=(os.path.join(out_path,
                                                  basename + '-flow.float3')))

    if generateNet:
        net = Network()

        dataset = str(param('dataset'))
        if dataset is None:
            raise Exception('please specify dataset=...')

        use_augmentation_mean = bool(
            param('use_augmentation_mean', default=True))
        output = bool(param('output', default=False))
        prefix = str(param('prefix', default=None))

        basename = str(param('basename', default=''))
        height = int(param('height', default=-1))
        width = int(param('width', default=-1))
        assert use_augmentation_mean  # Must be used because we don't have mean color

        Block(net, dataset, output, (height, width), basename, prefix,
              use_augmentation_mean)

        print net.toProto()

    return Block
Beispiel #15
0
                        dest='synapse',
                        type=str,
                        default=None,
                        help='Load from a previously saved network.')
    parser.add_argument('--out',
                        dest='outFile',
                        type=str,
                        default=None,
                        help='Output image with box classifications.')
    parser.add_argument('image', help='Image to classify')
    options = parser.parse_args()

    # load everything into memory
    image = readImage(options.image)

    network = Network(options.synapse)

    # perform classification for multiple objects
    if options.multi:
        classification, confidence = createClassMap(network, image)
    else:
        classification, confidence = singleClassify(network, image)

    # write a product if it was asked for
    if options.outFile is not None:
        from PIL import Image
        confidence = np.ndarray.astype(normalize(confidence) * 255,
                                       dtype='uint8')
        img = Image.fromarray(confidence, mode='L')
        img.save(options.outFile)
def solvers(rank, ngpus_per_node, args):
    if rank == 0:
        logger = create_logger()
        logger.info('Running distributed data parallel on {} gpus.'.format(
            args.world_size))
    torch.cuda.set_device(rank)
    torch.distributed.init_process_group(backend='nccl',
                                         init_method=args.init_method,
                                         world_size=args.world_size,
                                         rank=rank)
    # set initial value
    start_epoch = 0
    best_ssim = 0.0
    # model
    model = Network(num_layers=args.num_layers, rank=rank)
    # whether load checkpoint
    if args.pretrained or args.mode == 'test':
        model_path = os.path.join(args.model_save_path,
                                  'best_checkpoint.pth.tar')
        assert os.path.isfile(model_path)
        checkpoint = torch.load(model_path,
                                map_location='cuda:{}'.format(rank))
        start_epoch = checkpoint['epoch']
        lr = checkpoint['lr']
        args.lr = lr
        best_ssim = checkpoint['best_ssim']
        model.load_state_dict(checkpoint['model'])
        if rank == 0:
            logger.info('Load checkpoint at epoch {}.'.format(start_epoch))
            logger.info('Current learning rate is {}.'.format(lr))
            logger.info(
                'Current best ssim in train phase is {}.'.format(best_ssim))
            logger.info('The model is loaded.')
    elif args.use_init_weights:
        init_weights(model, init_type=args.init_type, gain=args.gain)
        if rank == 0:
            logger.info('Initialize model with {}.'.format(args.init_type))
    model = model.to(rank)
    model = DDP(model, device_ids=[rank])

    # criterion, optimizer, learning rate scheduler
    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    if not args.pretrained:
        warm_up = lambda epoch: epoch / args.warmup_epochs if epoch <= args.warmup_epochs else 1
        scheduler_wu = torch.optim.lr_scheduler.LambdaLR(optimizer=optimizer,
                                                         lr_lambda=warm_up)
    scheduler_re = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer=optimizer, mode='max', factor=0.3, patience=20)
    early_stopping = EarlyStopping(patience=50, delta=1e-5)

    # test step
    if args.mode == 'test':
        test_set = Dataset(args.test_path, args.u_mask_path,
                           args.s_mask_up_path, args.s_mask_down_path,
                           args.test_sample_rate)
        test_loader = DataLoader(dataset=test_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 pin_memory=True)
        if rank == 0:
            logger.info('The size of test dataset is {}.'.format(
                len(test_set)))
            logger.info('Now testing {}.'.format(args.exp_name))
        model.eval()
        with torch.no_grad():
            test_log = []
            start_time = time.time()
            test_log = forward('test', rank, model, test_loader, criterion,
                               optimizer, test_log, args)
            test_time = time.time() - start_time
        # test information
        test_loss = test_log[0]
        test_psnr = test_log[1]
        test_ssim = test_log[2]
        if rank == 0:
            logger.info(
                'time:{:.5f}s\ttest_loss:{:.7f}\ttest_psnr:{:.5f}\ttest_ssim:{:.5f}'
                .format(test_time, test_loss, test_psnr, test_ssim))
        return

    # training step
    train_set = Dataset(args.train_path, args.u_mask_path, args.s_mask_up_path,
                        args.s_mask_down_path, args.train_sample_rate)
    train_sampler = DistributedSampler(train_set)
    train_loader = DataLoader(dataset=train_set,
                              batch_size=args.batch_size,
                              shuffle=(train_sampler is None),
                              pin_memory=True,
                              sampler=train_sampler)
    val_set = Dataset(args.val_path, args.u_mask_path, args.s_mask_up_path,
                      args.s_mask_down_path, args.val_sample_rate)
    val_loader = DataLoader(dataset=val_set,
                            batch_size=args.batch_size,
                            shuffle=False,
                            pin_memory=True)
    if rank == 0:
        logger.info(
            'The size of training dataset and validation dataset is {} and {}, respectively.'
            .format(len(train_set), len(val_set)))
        logger.info('Now training {}.'.format(args.exp_name))
        writer = SummaryWriter(args.loss_curve_path)
    for epoch in range(start_epoch + 1, args.num_epochs + 1):
        train_sampler.set_epoch(epoch)
        train_log = [epoch]
        epoch_start_time = time.time()
        model.train()
        train_log = forward('train', rank, model, train_loader, criterion,
                            optimizer, train_log, args)
        model.eval()
        with torch.no_grad():
            train_log = forward('val', rank, model, val_loader, criterion,
                                optimizer, train_log, args)
        epoch_time = time.time() - epoch_start_time
        # train information
        epoch = train_log[0]
        train_loss = train_log[1]
        lr = train_log[2]
        val_loss = train_log[3]
        val_psnr = train_log[4]
        val_ssim = train_log[5]

        is_best = val_ssim > best_ssim
        best_ssim = max(val_ssim, best_ssim)
        if rank == 0:
            logger.info(
                'epoch:{:<8d}time:{:.5f}s\tlr:{:.8f}\ttrain_loss:{:.7f}\tval_loss:{:.7f}\tval_psnr:{:.5f}\t'
                'val_ssim:{:.5f}'.format(epoch, epoch_time, lr, train_loss,
                                         val_loss, val_psnr, val_ssim))
            writer.add_scalars('loss', {
                'train_loss': train_loss,
                'val_loss': val_loss
            }, epoch)
            # save checkpoint
            checkpoint = {
                'epoch': epoch,
                'lr': lr,
                'best_ssim': best_ssim,
                'model': model.module.state_dict()
            }
            if not os.path.exists(args.model_save_path):
                os.makedirs(args.model_save_path)
            model_path = os.path.join(args.model_save_path,
                                      'checkpoint.pth.tar')
            best_model_path = os.path.join(args.model_save_path,
                                           'best_checkpoint.pth.tar')
            torch.save(checkpoint, model_path)
            if is_best:
                shutil.copy(model_path, best_model_path)
        # scheduler
        if epoch <= args.warmup_epochs and not args.pretrained:
            scheduler_wu.step()
        scheduler_re.step(val_ssim)
        early_stopping(val_ssim, loss=False)
        if early_stopping.early_stop:
            if rank == 0:
                logger.info('The experiment is early stop!')
            break
    if rank == 0:
        writer.close()
    return
print(dataset_size)
indices = list(range(dataset_size))
split = int(np.floor(validation_split * dataset_size))
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]

train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)

train_loader = torch.utils.data.DataLoader(dataset,
                                           batch_size=32,
                                           sampler=train_sampler)
valid_loader = torch.utils.data.DataLoader(dataset,
                                           batch_size=32,
                                           sampler=valid_sampler)
model = Network()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
criterion = nn.CrossEntropyLoss()

train_iter = iter(train_loader)
for e in range(num_epoch):
    try:
        inputs, _ = next(train_iter)
    except StopIteration:
        train_iter = iter(train_loader)
        inputs, _ = next(train_iter)

    #inputs = inputs.to(device)
Beispiel #18
0
def standardDeploy(NetworkBlock, generateNet=True):
    '''
    @brief Create Deploy-mode wrapper for a raw net
    
    @param NetworkBlock Block(..) function of a raw network
    @param generateNet IFF TRUE, a network prototxt will be made and printed
    
    @returns Deploy-mode Block(..) function that adds data preprocessing and results postprocessing to a raw network
    '''
    def Block(net,
              img0,
              img1,
              flow_gt,
              width,
              height,
              mean_color,
              augmentation_mean=True):
        '''
        @brief Add data preprocessing to a network and connect data inputs
        
        @param net Incomplete network definition
        @param img0 Optical flow: First image
        @param img1 Optical flow: Second image
        @param flow_gt Optical flow: Flow groundtruth
        @param width Input width (pixels)
        @param height Input height (pixels)
        @param mean_color Data mean to be subtracted from data if "augmentation_mean" is FALSE
        @param augmentation_mean IFF TRUE, data mean will be computed on the fly
        
        @returns Optical flow prediction layer of the network "net"
        '''
        blobs = net.namedBlobs()
        ## Connect inputs
        blobs.img0 = img0
        blobs.img1 = img1
        blobs.flow_gt = flow_gt
        ## Rescale input images to [0,1]
        blobs.img0s = net.imageToRange01(blobs.img0)
        blobs.img1s = net.imageToRange01(blobs.img1)

        ## Subtract given mean or connect mean computation layer
        if augmentation_mean:
            blobs.img0_nomean = net.subtractAugmentationMean(blobs.img0s,
                                                             name="img0s_aug",
                                                             width=width,
                                                             height=height)
            blobs.img1_nomean = net.subtractAugmentationMean(blobs.img1s,
                                                             name="img1s_aug",
                                                             width=width,
                                                             height=height)
        else:
            blobs.img0_nomean = net.subtractMean(blobs.img0s, mean_color)
            blobs.img1_nomean = net.subtractMean(blobs.img1s, mean_color)

        ## Resample input data (needs to be 64-pixels aligned)
        divisor = 64.
        temp_width = ceil(width / divisor) * divisor
        temp_height = ceil(height / divisor) * divisor
        rescale_coeff_x = width / temp_width
        rescale_coeff_y = height / temp_height

        blobs.img0_nomean_resize = net.resample(blobs.img0_nomean,
                                                width=temp_width,
                                                height=temp_height,
                                                type='LINEAR',
                                                antialias=True)
        blobs.img1_nomean_resize = net.resample(blobs.img1_nomean,
                                                width=temp_width,
                                                height=temp_height,
                                                type='LINEAR',
                                                antialias=True)
        ## Use NEAREST here, since KITTI groundtruth is sparse
        blobs.flow_gt_resize = net.resample(blobs.flow_gt,
                                            width=temp_width,
                                            height=temp_height,
                                            type='NEAREST',
                                            antialias=True)

        ## Connect data preprocessing layers to raw net
        from net import Block as Network
        prediction = NetworkBlock(net, blobs.img0_nomean_resize,
                                  blobs.img1_nomean_resize,
                                  blobs.flow_gt_resize)

        ## Resample net output to input resolution
        blobs.predict_flow_resize = net.resample(prediction,
                                                 width=width,
                                                 height=height,
                                                 reference=None,
                                                 type='LINEAR',
                                                 antialias=True)
        blobs.predict_flow_final = net.scale(
            blobs.predict_flow_resize, (rescale_coeff_x, rescale_coeff_y))

        ## Connect L1 flow loss layer
        epe_loss = Layers.L1Loss(net,
                                 (blobs.flow_gt, blobs.predict_flow_final),
                                 nout=1,
                                 loss_weight=(1, ),
                                 name='flow_epe',
                                 l2_per_location=True,
                                 normalize_by_num_entries=True,
                                 epsilon=0)
        epe_loss.setName('flow_epe')
        epe_loss.enableOutput()

        return blobs.predict_flow_final

    if generateNet:
        net = Network()
        Block(net)
        print net.toProto()

    return Block
Beispiel #19
0
def train(args):
    """Train
    """
    if not os.path.exists(args.save_path):
        os.makedirs(args.save_path)

    net = Network(args.vocab_size, args.emb_size, args.hidden_size)

    train_program = fluid.Program()
    train_startup = fluid.Program()
    if "CE_MODE_X" in os.environ:
        train_program.random_seed = 110
        train_startup.random_seed = 110
    with fluid.program_guard(train_program, train_startup):
        with fluid.unique_name.guard():
            logits, loss = net.network(args.loss_type)
            loss.persistable = True
            logits.persistable = True
            # gradient clipping
            fluid.clip.set_gradient_clip(
                clip=fluid.clip.GradientClipByValue(max=1.0, min=-1.0))

            optimizer = fluid.optimizer.Adam(learning_rate=args.learning_rate)
            optimizer.minimize(loss)
            print("begin memory optimization ...")
            fluid.memory_optimize(train_program)
            print("end memory optimization ...")

    test_program = fluid.Program()
    test_startup = fluid.Program()
    if "CE_MODE_X" in os.environ:
        test_program.random_seed = 110
        test_startup.random_seed = 110
    with fluid.program_guard(test_program, test_startup):
        with fluid.unique_name.guard():
            logits, loss = net.network(args.loss_type)
            loss.persistable = True
            logits.persistable = True

    test_program = test_program.clone(for_test=True)
    if args.use_cuda:
        place = fluid.CUDAPlace(0)
        dev_count = fluid.core.get_cuda_device_count()
    else:
        place = fluid.CPUPlace()
        dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))

    print("device count %d" % dev_count)
    print("theoretical memory usage: ")
    print(
        fluid.contrib.memory_usage(program=train_program,
                                   batch_size=args.batch_size))

    exe = fluid.Executor(place)
    exe.run(train_startup)
    exe.run(test_startup)

    train_exe = fluid.ParallelExecutor(use_cuda=args.use_cuda,
                                       loss_name=loss.name,
                                       main_program=train_program)

    test_exe = fluid.ParallelExecutor(use_cuda=args.use_cuda,
                                      main_program=test_program,
                                      share_vars_from=train_exe)

    if args.word_emb_init is not None:
        print("start loading word embedding init ...")
        if six.PY2:
            word_emb = np.array(pickle.load(open(args.word_emb_init,
                                                 'rb'))).astype('float32')
        else:
            word_emb = np.array(
                pickle.load(open(args.word_emb_init, 'rb'),
                            encoding="bytes")).astype('float32')
        net.set_word_embedding(word_emb, place)
        print("finish init word embedding  ...")

    print("start loading data ...")

    def train_with_feed(batch_data):
        """
        Train on one batch
        """
        #to do get_feed_names
        feed_dict = dict(zip(net.get_feed_names(), batch_data))

        cost = train_exe.run(feed=feed_dict, fetch_list=[loss.name])
        return cost[0]

    def test_with_feed(batch_data):
        """
        Test on one batch
        """
        feed_dict = dict(zip(net.get_feed_names(), batch_data))

        score = test_exe.run(feed=feed_dict, fetch_list=[logits.name])
        return score[0]

    def evaluate():
        """
        Evaluate to choose model
        """
        val_batches = reader.batch_reader(args.val_path, args.batch_size,
                                          place, args.max_len, 1)
        scores = []
        labels = []
        for batch in val_batches:
            scores.extend(test_with_feed(batch))
            labels.extend([x[0] for x in batch[2]])

        return eva.evaluate_Recall(zip(scores, labels))

    def save_exe(step, best_recall):
        """
        Save exe conditional
        """
        recall_dict = evaluate()
        print('evaluation recall result:')
        print('1_in_2: %s\t1_in_10: %s\t2_in_10: %s\t5_in_10: %s' %
              (recall_dict['1_in_2'], recall_dict['1_in_10'],
               recall_dict['2_in_10'], recall_dict['5_in_10']))

        if recall_dict['1_in_10'] > best_recall and step != 0:
            fluid.io.save_inference_model(args.save_path,
                                          net.get_feed_inference_names(),
                                          logits,
                                          exe,
                                          main_program=train_program)

            print("Save model at step %d ... " % step)
            print(
                time.strftime('%Y-%m-%d %H:%M:%S',
                              time.localtime(time.time())))
            best_recall = recall_dict['1_in_10']
        return best_recall

    # train over different epoches
    global_step, train_time = 0, 0.0
    best_recall = 0
    for epoch in six.moves.xrange(args.num_scan_data):
        train_batches = reader.batch_reader(args.train_path, args.batch_size,
                                            place, args.max_len,
                                            args.sample_pro)

        begin_time = time.time()
        sum_cost = 0
        ce_cost = 0
        for batch in train_batches:
            if (args.save_path is not None) and (global_step % args.save_step
                                                 == 0):
                best_recall = save_exe(global_step, best_recall)

            cost = train_with_feed(batch)
            global_step += 1
            sum_cost += cost.mean()
            ce_cost = cost.mean()

            if global_step % args.print_step == 0:
                print('training step %s avg loss %s' %
                      (global_step, sum_cost / args.print_step))
                sum_cost = 0

        pass_time_cost = time.time() - begin_time
        train_time += pass_time_cost
        print("Pass {0}, pass_time_cost {1}".format(
            epoch, "%2.2f sec" % pass_time_cost))
        if "CE_MODE_X" in os.environ and epoch == args.num_scan_data - 1:
            card_num = get_cards()
            print("kpis\ttrain_duration_card%s\t%s" %
                  (card_num, pass_time_cost))
            print("kpis\ttrain_loss_card%s\t%s" % (card_num, ce_cost))
Beispiel #20
0
from player import Player

pygame.font.init()

WIDTH, HEIGHT = 1280, 720
WIN = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("game_a")

BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)

DISCONNECT_MESSAGE = "!DISCONNECT"
n = Network(socket.gethostbyname(socket.gethostname()))
name = "PGCLIENT"
first = Message(name, "CONNECTED", "default", time.time())
n.connect(first)

last_message = None


def main():
    run = True
    FPS = 60

    main_font = pygame.font.SysFont("Arial", 24)

    clock = pygame.time.Clock()
Beispiel #21
0
    get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE, encoding='UTF-8'))

all_embs = np.stack(embeddings_index.values())
emb_mean, emb_std = all_embs.mean(), all_embs.std()
embed_size = all_embs.shape[1]
word_index = tokenizer.word_index
nb_words = min(max_features, len(word_index))
embedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embed_size))
for word, i in word_index.items():
    if i >= max_features: continue
    embedding_vector = embeddings_index.get(word)
    if embedding_vector is not None: embedding_matrix[i] = embedding_vector
filter_sizes = [1, 2, 3, 5]
num_filters = 36

net = Network(embedding_matrix, max_features, embed_size, maxlen, num_filters,
              filter_sizes)
for m in net.modules():
    if isinstance(m, nn.Conv2d):
        nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
optimizer = torch.optim.Adam(net.parameters(),
                             lr=0.0001)  # optimize all cnn parameters
loss_func = nn.CrossEntropyLoss()
#train
print('training')
batch_size = 512

lenth = train_X.shape[0]
for step in range(int(lenth / batch_size)):
    dx = train_X[batch_size * step:batch_size * (step + 1)]
    dy = np.array([
        train_y[batch_size * step:batch_size * (step + 1)]
Beispiel #22
0
def main():
    ##  Network initialize  ##
    net = Network(classes=2, arch=args.arch)  # defalt number of classes 2
    #net.load_state_dict(torch.load('./model/cs_globalmean/model_10.pth'))
    #print('Load model successfully')

    ##  define loss function (criterion) and optimizer  ##
    criterion = nn.CosineEmbeddingLoss().cuda()
    optimizer = torch.optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay = 1e-4)

    ##  Data loading  ##
    traindir = os.path.join(args.train_data, 'train')
    valpdir   = os.path.join(args.test_data, 'pocket')
    valldir   = os.path.join(args.test_data, 'ligand')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    num_classes = len([name for name in os.listdir(traindir)]) - 1
    print("num_classes = '{}'".format(num_classes))

    train_data = datasets.ImageFolder(  ## train/tdata, fdata
        traindir,
        transforms.Compose([
            transforms.ToTensor(),  ## (height x width, channel),(0-255) -> (channel x height x width),(0.0-1.0)
            normalize,              ## GRB の正規化
        ]))
    train_loader = torch.utils.data.DataLoader(dataset=train_data,
                                            batch_size=args.batch_size,
                                            shuffle=False,
                                            num_workers=args.workers)

    val_pdata = datasets.ImageFolder(  ## val/pocket
        valpdir,
        transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ]))
    val_ploader = torch.utils.data.DataLoader(dataset=val_pdata,
                                            batch_size=20,  # batch-size for test
                                            shuffle=False,
                                            num_workers=args.workers)

    val_ldata = datasets.ImageFolder(  ## val/ligand
        valldir,
        transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ]))
    val_lloader = torch.utils.data.DataLoader(dataset=val_ldata,
                                            batch_size=20,  # batch-size for test
                                            shuffle=False,
                                            num_workers=args.workers)

    ##  Train  ##
    print(('Start training: lr %f, batch size %d, classes %d'%(args.lr, args.batch_size, num_classes)))
    steps = args.start_epoch
    iter_per_epoch = args.batch_size//40

    imgs = []
    lbls = []
    image_list = []
    label_list = []
    for i, (images, labels) in enumerate(train_loader):
        imgs.append(images)
        lbls.append(labels)

    shuffle_list = [i*40 for i in range(iter_per_epoch*len(imgs))]
    random.shuffle(shuffle_list)

    list_length = iter_per_epoch*len(imgs)
    for i in range(list_length):
        s = shuffle_list[i]//args.batch_size
        f = shuffle_list[i]%args.batch_size
        image_list.append(imgs[s][f:f+40])
        label_list.append(lbls[s][f:f+40])

    init_numdict = {}
    numlist = []
    for i in range(list_length):
        if label_list[i][0].tolist()==0:
            numlist.append(i)

    for i in range(int(list_length/2)):
        init_numdict[i] = numlist[i]

    for epoch in range(args.start_epoch, args.epochs):
        #if (epoch+1)%(int(list_length/2)-1)==0 and epoch>args.start_epoch:
        if epoch%1==0 and epoch>19:
            path = modelpath + 'model_' + str(epoch) + '.pth'
            torch.save(net.state_dict(), path)
            print('>>>>>Save model successfully<<<<<')

        loss = 0
        sum_loss = 0

        if (epoch+1)%(int(list_length/2)-1)==0 and epoch>0:
            image_list, init_numdict = shuffle_fpair(image_list, label_list, list_length, init_numdict, 1)
            print('Shuffle mode >>>> 1')
        else:
            image_list, init_numdict = shuffle_fpair(image_list, label_list, list_length, init_numdict, 0)
            print('Shuffle mode >>>> 0')

        image_list, label_list, init_numdict = shuffle_set(image_list, label_list, list_length, init_numdict)

        for i , (images, lables) in enumerate(zip(image_list, label_list)):
            images = Variable(image_list[i])
            labels = Variable(label_list[i])

            # Forward + Backward + Optimize
            label = torch.tensor([labels[0]])
            label = label*2-1

            optimizer.zero_grad()

            output_lig, output_poc = net(images, 'train', 'max', 'max')

            sim = cos(output_lig, output_poc)
            loss += criterion(output_lig, output_poc, label.type_as(output_lig))

            if (i+1)%(iter_per_epoch)==0 and i>0:
                loss /= iter_per_epoch  ## calculate loss average
                sum_loss += loss
                print('Epoch: %2d, iter: %2d, Loss: %.4f' %(epoch, i+1, loss))
                if (i+1)==list_length:
                    print('>>>Epoch: %2d, Train_Loss: %.4f' %(epoch, sum_loss/list_length*iter_per_epoch))
                    sum_loss = 0
                    #test(net, val_ploader, val_lloader, epoch)
                loss.backward()
                optimizer.step()
                loss = 0
Beispiel #23
0
    print(
        "Top1:{:>5.2f}%  Top2:{:>5.2f}%  Top3:{:>5.2f}%".format(
            top1_acc * 100, top2_acc * 100, top3_acc * 100
        )
    )
    pbar.close()


if __name__ == "__main__":
    args = parse_args()
    update_config(cfg, args)

    test_set = eval(cfg.DATASET.DATASET)("valid", cfg)
    num_classes = test_set.get_num_classes()
    device = torch.device("cpu" if cfg.CPU_MODE else "cuda")
    model = Network(cfg, mode="test", num_classes=num_classes)

    model_dir = os.path.join(cfg.OUTPUT_DIR, cfg.NAME, "models")
    model_file = cfg.TEST.MODEL_FILE
    if "/" in model_file:
        model_path = model_file
    else:
        model_path = os.path.join(model_dir, model_file)
    model.load_model(model_path)

    if cfg.CPU_MODE:
        model = model.to(device)
    else:
        model = torch.nn.DataParallel(model).cuda()

    testLoader = DataLoader(
Beispiel #24
0
import sys
import time
import socket
from net import Network
from message import Message

ip = input("Server: ")
if ip == "":
    ip = socket.gethostbyname(socket.gethostname())

name = input("Username: "******"!DISCONNECT"

n = Network(ip)
first = Message(name, "test", "default", time.time())
n.connect(first)


def send_msg(msg):
    m = Message(name, str(msg), "default", time.time())
    return n.send(m)


def main():
    while True:
        send_msg(str(input()))


if __name__ == '__main__':
    try:
        main()
Beispiel #25
0
import numpy as np

import load_data as ld
from net import Network
from plots import plotLC

ALPHA = 1e-2
BATCH_SIZE = 20
NITER = 12000

# load test data
test = np.load('test.npy')
test_labels = ld.one_hot(np.load('test_labels.npy'))

# initialize network
n = Network(ALPHA)
# load training and validation datasets
D = ld.DataSet()

train_accuracies = []
batch_accuracies = []
valid_batch_accuracies = []
valid_accuracies = []

# train the network
for i in range(NITER):
    batch = D.next_batch(BATCH_SIZE)
    batch_accuracies.append(n.getAccuracy(batch[0], batch[1]))
    if i > 0 and i % (D.get_trainsize() / BATCH_SIZE) == 0:
        train_accuracies.append(sum(batch_accuracies) / len(batch_accuracies))
        for j in range((int)(D.get_validsize() / BATCH_SIZE)):
Beispiel #26
0
def main():
    ##  Network initialize  ##
    net = Network()                              ## defalt number of classes 2
    model_path = args.model
    print(model_path)
    net.load_state_dict(torch.load(model_path))  ## load trained model
    print('Load model Successfully')
    net.eval()

    ##  Data loading  ##
    pocdir = os.path.join(args.data, 'pocket')
    ligdir = os.path.join(args.data, 'ligand')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    num_classes = len([name for name in os.listdir(pocdir)]) - 1
    print("num_classes = '{}'".format(num_classes))

    pocket_data = datasets.ImageFolder(  ## pocket
        pocdir,
        transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ]))
    pocket_loader = torch.utils.data.DataLoader(dataset=pocket_data,
                                            batch_size=args.batch_size,
                                            shuffle=False,
                                            num_workers=args.workers)

    ligand_data = datasets.ImageFolder(  ## ligand
        ligdir,
        transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ]))
    ligand_loader = torch.utils.data.DataLoader(dataset=ligand_data,
                                            batch_size=args.batch_size,
                                            shuffle=False,
                                            num_workers=args.workers)

    ##  Virtual Screening  ##
    print('###   Start Screening   ###')
    batch_time, net_time = [], []
    sum_time = 0
    top01, top05, top10 =0, 0, 0
    for i, ((pimage, plabel), pid) in enumerate(zip(pocket_loader, pdbids)):
        start_time = time()

        poc_image = Variable(pimage)
        poc_label = Variable(plabel[0])
        output_poc = net(poc_image, 'pocket', args.global_pooling, args.pooling)
        print('PDB ID: ', pid.replace('\n',''))
        outputs = {}
        results = {}
        for j, (limage, llabel) in enumerate(ligand_loader):
            lig_image = Variable(limage)
            lig_label = Variable(llabel[0])

            ## Estimate similarity
            output_lig = net(lig_image, 'ligand', args.global_pooling, args.pooling)
            sim = cos(output_lig, output_poc)
            outputs[lig_label.tolist()] = sim

            ## Calculate accuracy
            if j==57:
                sortdic = sorted(outputs.items(), key=lambda x:x[1], reverse=True)
                top10_label = [l[0] for l in sortdic[0:10]]
                result = [s[0] for s in sortdic]
                #print(result)
                print(top10_label)
                topn = find_topn(result, poc_label.tolist())
                print('No. %2d  finds Top %2d' %(poc_label.tolist(), topn))
                prec01, prec05, prec10 = caltop10(poc_label.tolist(), top10_label)
                top01 += prec01
                top05 += prec05
                top10 += prec10
        sum_time += time()-start_time
        print('Top1: %.2f%%, Top5: %.2f%%, Top10: %.2f%%\n' %(top01/(i+1)*100, top05/(i+1)*100, top10/(i+1)*100))

    print('\n\n###   Virtual Screening for %2d proteins   ###' %(i+1))
    print('Top1 Accuracy: %.4f%%, Top5 Accuracy: %.4f%%, Top10 Accuracy: %.4f%%' %(top01/(i+1)*100, top05/(i+1)*100, top10/(i+1)*100))
    print(sum_time)
    pdbids.close()
Beispiel #27
0
def finetune(args):
    """
    Finetune
    """
    if not os.path.exists(args.save_path):
        os.makedirs(args.save_path)

    net = Network(args.vocab_size, args.emb_size, args.hidden_size)

    train_program = fluid.Program()
    train_startup = fluid.Program()
    if "CE_MODE_X" in os.environ:
        train_program.random_seed = 110
        train_startup.random_seed = 110
    with fluid.program_guard(train_program, train_startup):
        with fluid.unique_name.guard():
            logits, loss = net.network(args.loss_type)
            loss.persistable = True
            logits.persistable = True
            # gradient clipping
            fluid.clip.set_gradient_clip(
                clip=fluid.clip.GradientClipByValue(max=1.0, min=-1.0))

            optimizer = fluid.optimizer.Adam(
                learning_rate=fluid.layers.exponential_decay(
                    learning_rate=args.learning_rate,
                    decay_steps=400,
                    decay_rate=0.9,
                    staircase=True))
            optimizer.minimize(loss)
            print("begin memory optimization ...")
            fluid.memory_optimize(train_program)
            print("end memory optimization ...")

    test_program = fluid.Program()
    test_startup = fluid.Program()
    if "CE_MODE_X" in os.environ:
        test_program.random_seed = 110
        test_startup.random_seed = 110
    with fluid.program_guard(test_program, test_startup):
        with fluid.unique_name.guard():
            logits, loss = net.network(args.loss_type)
            loss.persistable = True
            logits.persistable = True

    test_program = test_program.clone(for_test=True)
    if args.use_cuda:
        place = fluid.CUDAPlace(0)
        dev_count = fluid.core.get_cuda_device_count()
    else:
        place = fluid.CPUPlace()
        dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))

    print("device count %d" % dev_count)
    print("theoretical memory usage: ")
    print(
        fluid.contrib.memory_usage(program=train_program,
                                   batch_size=args.batch_size))

    exe = fluid.Executor(place)
    exe.run(train_startup)
    exe.run(test_startup)

    train_exe = fluid.ParallelExecutor(use_cuda=args.use_cuda,
                                       loss_name=loss.name,
                                       main_program=train_program)

    test_exe = fluid.ParallelExecutor(use_cuda=args.use_cuda,
                                      main_program=test_program,
                                      share_vars_from=train_exe)

    if args.init_model:
        init.init_pretraining_params(exe,
                                     args.init_model,
                                     main_program=train_startup)
        print('sccuess init %s' % args.init_model)

    print("start loading data ...")

    def train_with_feed(batch_data):
        """
        Train on one batch
        """
        #to do get_feed_names
        feed_dict = dict(zip(net.get_feed_names(), batch_data))

        cost = train_exe.run(feed=feed_dict, fetch_list=[loss.name])
        return cost[0]

    def test_with_feed(batch_data):
        """
        Test on one batch
        """
        feed_dict = dict(zip(net.get_feed_names(), batch_data))

        score = test_exe.run(feed=feed_dict, fetch_list=[logits.name])
        return score[0]

    def evaluate():
        """
        Evaluate to choose model
        """
        val_batches = reader.batch_reader(args.val_path, args.batch_size,
                                          place, args.max_len, 1)
        scores = []
        labels = []
        for batch in val_batches:
            scores.extend(test_with_feed(batch))
            labels.extend([x[0] for x in batch[2]])
        scores = [x[0] for x in scores]
        return eva.evaluate_cor(scores, labels)

    def save_exe(step, best_cor):
        """
        Save exe conditional
        """
        cor = evaluate()
        print('evaluation cor relevance %s' % cor)
        if cor > best_cor and step != 0:
            fluid.io.save_inference_model(args.save_path,
                                          net.get_feed_inference_names(),
                                          logits,
                                          exe,
                                          main_program=train_program)
            print("Save model at step %d ... " % step)
            print(
                time.strftime('%Y-%m-%d %H:%M:%S',
                              time.localtime(time.time())))
            best_cor = cor
        return best_cor

    # train over different epoches
    global_step, train_time = 0, 0.0
    best_cor = 0.0
    pre_index = -1
    for epoch in six.moves.xrange(args.num_scan_data):
        train_batches = reader.batch_reader(args.train_path, args.batch_size,
                                            place, args.max_len,
                                            args.sample_pro)

        begin_time = time.time()
        sum_cost = 0
        for batch in train_batches:
            if (args.save_path is not None) and (global_step % args.save_step
                                                 == 0):
                best_cor = save_exe(global_step, best_cor)

            cost = train_with_feed(batch)
            global_step += 1
            sum_cost += cost.mean()

            if global_step % args.print_step == 0:
                print('training step %s avg loss %s' %
                      (global_step, sum_cost / args.print_step))
                sum_cost = 0

        pass_time_cost = time.time() - begin_time
        train_time += pass_time_cost
        print("Pass {0}, pass_time_cost {1}".format(
            epoch, "%2.2f sec" % pass_time_cost))
        roiimage = gray[y:y + h, x:x + w]
        roiimage = cv2.resize(roiimage, (128, 128),
                              interpolation=cv2.INTER_CUBIC)
        cv2.imwrite('./' + str(index) + '.jpg', roiimage)
        i += 1

    new_path = '/home/htkang/bigdata/age_gender/code/age_gender/joint/' + str(
        index) + '.jpg'
    test_image = cv2.imread(new_path)

    test_gray = cv2.cvtColor(test_image, cv2.COLOR_BGR2GRAY)  # test image

    net = Network(n_output_gen=2,
                  n_output_age=4,
                  n_length=128,
                  learning_rate=0.01,
                  batch_size=32,
                  channel=1,
                  output_graph=False,
                  use_ckpt=True)
    test_gray_1 = tf.expand_dims(test_gray, 0)  # add dimension
    test_gray_2 = tf.expand_dims(test_gray_1, -1)
    with tf.Session() as sess:
        input_image = sess.run(test_gray_2)

    gender, age = net.get_result(input_image)

    print("gender is:%d, age is:%d " % (gender, age))

    gender, age = transform(age, gender)

    font = cv2.cv.InitFont(cv2.cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 2, 1)
Beispiel #29
0
#Test on plotting quadratic curve

import numpy as np
from net import Network, train
import matplotlib.pyplot as plt
import matplotlib

matplotlib.use('Agg')

iterations = 500

X = np.random.rand(1000, 1) * 2 * np.pi
y = (1 + np.sin(X)) / 2

net = Network(1, 1)
net, loss = train(net, X, y, iterations, 10)

plt.scatter(X, net.predict(X))
plt.xlabel('X')
plt.ylabel('Predicted y: sin(x)')
plt.savefig('sin2.png')

plt.clf()
plt.plot(np.arange(iterations), loss)
plt.xlabel('Iterations')
plt.ylabel('Loss')
plt.title('Sin Approximation Loss')
plt.savefig('sinloss2.png')
Beispiel #30
0
tf.reset_default_graph()

train_data_200='/home/juliussurya/workspace/360pano/tfrecords/pano_FOVrand_200_outdoor'
train_data_full = '/home/juliussurya/workspace/360pano/tfrecords/pano_FOVrand_train_outdoor' 
val_data = '/home/juliussurya/workspace/360pano/tfrecords/pano_FOVrand_val_outdoor' 
test_data = '/home/juliussurya/workspace/360pano/tfrecords/pano_FOVrand_test_outdoor' 
graph_path = '/home/juliussurya/workspace/360pano/graph/fov/enc_dec_flow_gan'
model_path = '/home/juliussurya/workspace/360pano/checkpoint/'

if sys.argv[1] == 'train':
    dataset = train_data_200
elif sys.argv[1] == 'test':
    dataset = val_data

with tf.name_scope('ReadDataset'):
    net = Network()
    x, y, fov = net.readDataset(dataset,2,50) #return output for checking

with tf.name_scope('FoVNet'):
    net.forwardFOV() # Run FOV network estimation
    net.lossFOV() # Compute loss FOV

net.forwardSmall() # Run single model network
net.forwardMed()
net.addSoftNoise(random.uniform(0.8,1.1),random.uniform(0.0,0.4)) #Add label noise
net.lossGANsmall()
net.lossGANmed()

with tf.name_scope('Minimizer'):
    # Learning rate , decay step
    lr_fov = [0.0001, 5000]