예제 #1
0
파일: HardNet.py 프로젝트: keeeeenw/hardnet
 def run(self):
     # set random seeds
     random.seed(self.args.seed)
     torch.manual_seed(self.args.seed)
     np.random.seed(self.args.seed)
     
     self.log_dir = self.args.log_dir
     if not os.path.isdir(self.log_dir):
         os.makedirs(self.log_dir)
     self.log_dir = os.path.join(self.args.log_dir, self.suffix)
     self.descs_dir = os.path.join(self.log_dir, 'temp_descs')
     if self.test_on_w1bs:
         if not os.path.isdir(self.descs_dir):
             os.makedirs(self.descs_dir)
     logger, file_logger = None, None
     print("Creating Model")
     if(self.args.enable_logging):
         from Loggers import Logger, FileLogger
         logger = Logger(self.log_dir)
         #file_logger = FileLogger(./log/+self.suffix)
     print("Creating Loaders")
     train_loader, test_loaders = self.create_loaders(load_random_triplets = self.triplet_flag)
     
     print("Staring execution")
     self.execute(train_loader, test_loaders, self.model, logger, file_logger)
예제 #2
0
def init_loggers():
    global loggers
    #loggers['Application'] = Logger('Application', logging.INFO, format_str="%(name)s - %(levelname)s - %(message)s")
    loggers['Application'] = SilentLogger()

    loggers['SignalSending'] = SilentLogger()
    #loggers['SignalSending'] = Logger('SignalSenging', logging.INFO)

    loggers['Debug'] = Logger(
        'Debug',
        logging.DEBUG,
        format_str="%(name)s - %(levelname)s - %(message)s")
예제 #3
0
	def train(self, epoch, doEval):
		logger = Logger("aaeInfo/log/")
		parameters_recon = list(self.encoder.parameters()) + list(self.decoder.parameters())
		optimizer_recon = torch.optim.SGD(parameters_recon, lr=1e-6, momentum=0.9)
		optimizer_discr = torch.optim.SGD(self.discriminator.parameters(), lr=1e-6, momentum=0.1)
		optimizer_gener = torch.optim.SGD(self.encoder.parameters(), lr=1e-6, momentum=0.1)
		# optimizer_recon = torch.optim.Adam(parameters_recon, betas=(0.9, 0.999), lr=1e-5)
		# optimizer_discr = torch.optim.Adam(self.discriminator.parameters(), betas=(0.1, 0.001), lr=1e-4)
		# optimizer_gener = torch.optim.Adam(self.encoder.parameters(), betas=(0.1, 0.001), lr=1e-4)

		best_loss = float("Inf")
		for e in range(epoch):
			sys.stdout.flush()
			print("Epoch %d" % e)
			for i,x in enumerate(self.train_loader):
				x = x.cuda()
				optimizer_recon.zero_grad()
				recon_loss, recon_acc1, recon_acc2 = self.reconstruct_loss(x)
				recon_loss.backward()
				optimizer_recon.step()
				
				
				optimizer_discr.zero_grad()
				discr_loss, discr_acc = self.dicriminate_loss(x)
				discr_loss.backward()
				optimizer_discr.step()
				
				optimizer_gener.zero_grad()
				fool_loss, fool_acc = self.fool_loss(x)
				fool_loss.backward()
				optimizer_gener.step()
				
				print("Batch No: %d, recon_loss: %f, recon_acc1: %f, recon_acc2: %f, discr_loss: %f, discr_acc: %f, fool_loss: %f, fool_acc: %f" 
				% (i+1, recon_loss, recon_acc1, recon_acc2, discr_loss, discr_acc, fool_loss, fool_acc))
				sys.stdout.flush()
				
				
			if doEval:
				recon_loss, recon_acc1, recon_acc2, discri_loss, discr_acc, fool_loss, fool_acc = self.eval(self.train_loader, len(self.train_dataset))
				print("Reconstruction Train Loss: %f, Rule Accuracy: %f, Mol Accuracy: %f" % (recon_loss, recon_acc1, recon_acc2))
				print("Discrimination Train Loss: %f, Accuracy: %f" % (discri_loss, discr_acc))
				print("Fool Train Loss: %f, Accuracy: %f" % (fool_loss, fool_acc))
				recon_loss, recon_acc1, recon_acc2, discri_loss, discr_acc, fool_loss, fool_acc = self.eval(self.eval_loader, len(self.eval_dataset))
				print("Reconstruction Valid Loss: %f, Rule Accuracy: %f, Mol Accuracy: %f" % (recon_loss, recon_acc1, recon_acc2))
				print("Discrimination Valid Loss: %f, Accuracy: %f" % (discri_loss, discr_acc))
				print("Fool Valid Loss: %f, Accuracy: %f" % (fool_loss, fool_acc))
				self.log_info(logger, recon_loss, recon_acc1, recon_acc2, discri_loss, discr_acc, fool_loss,  fool_acc)
				if recon_loss < best_loss:
					print("Save Model")
					best_loss = recon_loss
					self.save()
예제 #4
0
 def train(self, epoch, doEval):
     logger = Logger("vaeInfo/log/")
     parameters = list(self.encoder.parameters()) + list(
         self.decoder.parameters())
     optimizer = torch.optim.Adam(parameters, lr=1e-5)
     # optimizer = torch.optim.SGD(parameters, lr=1e-5, momentum=0.9)
     best_acc = 0
     for e in range(epoch):
         sys.stdout.flush()
         print("Epoch %d" % e)
         for i, x in enumerate(self.train_loader):
             x = x.cuda()
             optimizer.zero_grad()
             batch_loss, recon_loss, kl_loss, acc1, acc2 = self.loss(x)
             batch_loss.backward()
             optimizer.step()
             # print("Batch No: %d, loss: %f, recon_loss: %f, kl_loss: %f, acc1: %f, acc2: %f"
             # % (i+1, batch_loss, recon_loss, kl_loss, acc1, acc2))
             sys.stdout.flush()
         if doEval:
             all_loss, recon_loss, kl_loss, acc1, acc2 = self.eval(
                 self.train_loader, len(self.train_dataset))
             print("Overall Training Loss: %f" % all_loss)
             print("Recontruction Training Loss: %f" % recon_loss)
             print("KL Training Loss: %f" % kl_loss)
             print("Rule Accuracy Training: %f" % acc1)
             print("Mol Accuracy Training: %f" % acc2)
             all_loss, recon_loss, kl_loss, acc1, acc2 = self.eval(
                 self.eval_loader, len(self.eval_dataset))
             print("Overall Validation Loss: %f" % all_loss)
             print("Recontruction Validation Loss: %f" % recon_loss)
             print("KL Validation Loss: %f" % kl_loss)
             print("Rule Accuracy Validation: %f" % acc1)
             print("Mol Accuracy Validation: %f" % acc2)
             self.log_info(logger, all_loss, recon_loss, kl_loss, acc1,
                           acc2)
             if acc2 > best_acc:
                 print("Save Model")
                 best_acc = acc2
                 self.save()
예제 #5
0
def play_one_game(board: Board, model: Model, logger: Logger):
    color = CROSS
    while True:
        policy = model.infer(board.data(color))
        pos = choice_move_with_epsilon(policy, board)
        if pos is False:
            logger.info("*** Move Choice Error ***")
            exit()
        board.put(pos, color)

        logger.debug("\n" + board.string_board())

        if board.is_finished:
            break

        color = turn_color(color)

    if board.result == CROSS:
        result = "X Win"
    elif board.result == CYCLE:
        result = "O win"
    else:
        result = "DRAW"
    logger.info("result is {}".format(result))
예제 #6
0
                                         descs_to_draw=[desc_name],
                                         logger=file_logger,
                                         tensor_logger = logger)
            else:
                w1bs.draw_and_save_plots(DESC_DIR=DESCS_DIR, OUT_DIR=OUT_DIR,
                                         methods=["SNN_ratio"],
                                         descs_to_draw=[desc_name])
        #randomize train loader batches

        train_loader, test_loaders2 = create_loaders(load_random_triplets=triplet_flag)
        '''
        print('i am here')

if __name__ == '__main__':
    LOG_DIR = args.log_dir
    if not os.path.isdir(LOG_DIR):
        os.makedirs(LOG_DIR)
    LOG_DIR = os.path.join(args.log_dir, suffix)
    DESCS_DIR = os.path.join(LOG_DIR, 'temp_descs')
    if TEST_ON_W1BS:
        if not os.path.isdir(DESCS_DIR):
            os.makedirs(DESCS_DIR)
    logger, file_logger = None, None
    model = HardNet()
    if(args.enable_logging):
        from Loggers import Logger, FileLogger
        logger = Logger(LOG_DIR)
        #file_logger = FileLogger(./log/+suffix)
    train_loader, test_loaders = create_loaders(load_random_triplets = triplet_flag)
    main(train_loader, test_loaders, model, logger, file_logger)
correct_pred = tf.equal(tf.argmax(prediction, 1),
                        tf.argmax(label_place_holder, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
print('done constructing model')

restorer = tf.train.Saver(variables_to_restore)

sess.run(tf.global_variables_initializer())
if args.base_network == 'InceptionV1':
    restorer.restore(sess, args.base_network_file)
print('done initializing')

if not os.path.isdir(args.log_dir):
    os.makedirs(args.log_dir + args.dataset)
logger = Logger(args.log_dir + args.dataset + '/' + suffix)

if not os.path.isdir('{}/{}/{}/'.format(args.model_dir, args.dataset, suffix)):
    os.makedirs('{}/{}/{}/'.format(args.model_dir, args.dataset, suffix))

saver = tf.train.Saver()

#learn one epoch to heat up the bn parameter
ptr = 0
random_ind = np.random.permutation(num_training_sample)
for step in tqdm(range(int(num_training_sample / training_batch_size) + 1)):
    real_size = min(training_batch_size, num_training_sample - ptr)
    if real_size <= 0:
        break
    img = np.zeros((real_size, default_image_size,\
            default_image_size, 3), dtype = np.float32)
예제 #8
0
        for x in dic['pred'][0][:, :, :, :, :]
    ]))
]

cp_models_path = os.path.join(
    os.path.abspath(os.path.dirname(__file__)), "models",
    "weights-improvement-{epoch:02d}-{val_loss:.2f}.hdf5")

model = create_model(train_config)
tf.set_random_seed(0)
sess = tf.keras.backend.get_session()
with sess.as_default():
    tb = TensorBoard(log_dir='./logs')
    logging.info("Start Training")
    model.fit_generator(generator=train_generator(),
                        validation_data=[validation_data, validation_data_y],
                        epochs=train_config['epochs'],
                        steps_per_epoch=int(training_elements /
                                            train_config['batchsize']),
                        callbacks=[
                            tb,
                            Logger(tb_logs=tb_writers),
                            ModelCheckpoint(cp_models_path,
                                            save_best_only=True)
                        ])
    model.save(train_config['model_output'])

data_queue.close()
for p in processes:
    p.terminate()
예제 #9
0
    import os ; os.environ['KERAS_BACKEND']='theano'
else:
    import os ; os.environ['KERAS_BACKEND']='tensorflow'

logging.getLogger('keras').setLevel(logging.INFO)

if args.mode == 'nlIB':
    suffix = '{}_encoder{}_beta{:1.1f}'.format(args.mode,args.encoder,args.beta)
else:
    suffix = '{}_encoder{}'.format(args.mode,args.encoder)

LOG_DIR = args.log_dir
if not os.path.isdir(LOG_DIR):
    os.makedirs(LOG_DIR)
LOG_DIR = args.log_dir + suffix
logger = Logger(LOG_DIR)

import reporting
import buildmodel
import keras.callbacks

arg_dict = vars(args)

VALIDATE_ON_TEST = True
#arg_dict['noise_logvar_trainable'] = True

trn, tst = buildmodel.get_mnist(args.trainN, args.testN)
# ***************************

arg_dict['INPUT_DIM'] = trn.X.shape[1]
print('# ARGS:', arg_dict)
    suffix = suffix + '_ln'
if args.label_smoothing:
    suffix = suffix + '_ls'
if args.norm_weights:
    suffix = suffix + '_nw'
if args.bn:
    suffix = suffix + '_bn'
    args.alpha = args.alpha / np.sqrt(args.embedding_dim)
if args.heat_up:
    suffix = suffix + '_heat'

LOG_DIR = args.log_dir
if not os.path.isdir(LOG_DIR):
    os.makedirs(LOG_DIR)
LOG_DIR = args.log_dir + suffix
logger = Logger(LOG_DIR)

# Parameters
learning_rate = args.learning_rate
training_epochs = args.nb_epoch
batch_size = 20
display_step = 10

# Network Parameters
n_input = 28  # MNIST data input (img shape: 28*28)
n_classes = 5  # MNIST total classes (0-4 digits)

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
예제 #11
0
correct_pred = tf.equal(tf.argmax(prediction, 1),
                        tf.argmax(label_place_holder, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
print('done constructing model')

print('initialize variables')
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
print('done initializing')

if not os.path.isdir(args.log_dir):
    os.makedirs(args.log_dir)
logger = Logger(args.log_dir + suffix)

global_step = 0
log_step = 0
for epoch in range(args.nb_epoch):
    ptr = 0
    for step in tqdm(range(int(num_training_sample / training_batch_size) +
                           1)):
        real_size = min(training_batch_size, num_training_sample - ptr)
        if real_size <= 0:
            break

        if args.data_augment:
            img = np.zeros((real_size, default_image_size,\
                    default_image_size, 3), dtype = np.float32)
            flip_flag = np.random.randint(2, size=real_size)
예제 #12
0
    np.random.seed(args.seed)

    if not os.path.isdir(args.log_dir):
        os.makedirs(args.log_dir)

    log_dir = os.path.join(args.log_dir, suffix)
    DESCS_DIR = os.path.join(log_dir, 'temp_descs')
    if TEST_ON_W1BS:
        if not os.path.isdir(DESCS_DIR):
            os.makedirs(DESCS_DIR)
    logger, file_logger = None, None
    if args.use_srn:
        from models.HardNet_SRN import HardNet_SRN
        model = HardNet_SRN(args.use_smooth)
    else:
        from models.HardNet import HardNet
        model = HardNet(args.use_arf, args.orientation)

    if (args.enable_logging):
        from Loggers import Logger, FileLogger
        logger = Logger(
            log_dir)  # remove all the file in LOG_DIR and begin log
        file_logger = FileLogger(os.path.join(args.log_dir, suffix))

    # load_random_triplets=False, the return data is (anchor, positive)
    # no negative patch, the negative patch is generated on-the-fly
    train_loader = create_loaders(load_random_triplets=triplet_flag)
    test_loaders = create_loaders(load_random_triplets=triplet_flag,
                                  train=False)
    main(train_loader, test_loaders, model, logger, file_logger)