Ejemplo n.º 1
0
def source_only(encoder, classifier, discriminator, source_train_loader,
                target_train_loader, save_name):
    print("Source-only training")
    for epoch in range(params.epochs):
        print('Epoch : {}'.format(epoch))

        encoder = encoder.train()
        classifier = classifier.train()
        discriminator = discriminator.train()

        classifier_criterion = nn.CrossEntropyLoss().cuda()

        start_steps = epoch * len(source_train_loader)
        total_steps = params.epochs * len(target_train_loader)

        for batch_idx, (source_data, target_data) in enumerate(
                zip(source_train_loader, target_train_loader)):
            source_image, source_label = source_data
            p = float(batch_idx + start_steps) / total_steps

            source_image = torch.cat(
                (source_image, source_image, source_image),
                1)  # MNIST convert to 3 channel
            source_image, source_label = source_image.cuda(
            ), source_label.cuda()  # 32

            optimizer = optim.SGD(list(encoder.parameters()) +
                                  list(classifier.parameters()),
                                  lr=0.01,
                                  momentum=0.9)

            optimizer = utils.optimizer_scheduler(optimizer=optimizer, p=p)
            optimizer.zero_grad()

            source_feature = encoder(source_image)

            # Classification loss
            class_pred = classifier(source_feature)
            class_loss = classifier_criterion(class_pred, source_label)

            class_loss.backward()
            optimizer.step()
            if (batch_idx + 1) % 50 == 0:
                print('[{}/{} ({:.0f}%)]\tClass Loss: {:.6f}'.format(
                    batch_idx * len(source_image),
                    len(source_train_loader.dataset),
                    100. * batch_idx / len(source_train_loader),
                    class_loss.item()))

        if (epoch + 1) % 10 == 0:
            test.tester(encoder,
                        classifier,
                        discriminator,
                        source_test_loader,
                        target_test_loader,
                        training_mode='source_only')
    save_model(encoder, classifier, discriminator, 'source', save_name)
    visualize(encoder, 'source', save_name)
Ejemplo n.º 2
0
def main():

    parser = argparse.ArgumentParser()

    parser.add_argument('--logs', type=str, default=None, help='logs by tensorboardX')
    parser.add_argument('--local_test', type=str2bool, default=False, help='local test verbose')
    parser.add_argument('--model_name', type=str, default="dcgan", help='model name for saving')
    parser.add_argument('--test', type=str2bool, default=False, help='call tester.py')
    parser.add_argument('--use_visdom', type=str2bool, default=False, help='visualization by visdom')
    args = parser.parse_args()

    parameters.print_params()

    if args.test == False:
        trainer(args)
    else:
        tester(args)
Ejemplo n.º 3
0
def main():
    source_train_loader = mnist.mnist_train_loader
    target_train_loader = mnistm.mnistm_train_loader

    if torch.cuda.is_available():
        get_free_gpu()
        print('Running GPU : {}'.format(torch.cuda.current_device()))
        encoder = model.Extractor().cuda()
        classifier = model.Classifier().cuda()
        discriminator = model.Discriminator().cuda()

        train.source_only(encoder, classifier, discriminator,
                          source_train_loader, target_train_loader, save_name)
        test.tester(encoder, classifier, discriminator, source_train_loader,
                    target_train_loader, 'source')

    else:
        print("There is no GPU -_-!")
Ejemplo n.º 4
0
#from hangingGui import runit
import test
#from test import yepp
from test import tester

global yepp
yepp = 'ro'

print(yepp)
test.tester()
print(yepp)
Ejemplo n.º 5
0
up_alp_interval = 10
save_ep_interval = 4
disp_interval = 2
nan_error = False

try:
    # phase train, 8 phases
    for phs in range(start_phs, phs_num):
        cur_res, batch_size, l_rate, k_imgs, update_alp = train_phases[phs]
        lr.assign(l_rate)

        # g_batch_size = batch_size * strategy.num_replicas_in_sync
        # losses = loss.losses2(g_batch_size, tf.distribute.has_strategy())
        g_batch_size = batch_size
        losses = loss.losses2()
        tester = test.tester(SAMP_DIR, batch_size)

        # data
        print("======= Make data: %dx%d =======" % (cur_res, cur_res))
        tr_data = data.img_ds(data_dir=DATA_DIR,
                              img_resize=cur_res,
                              batch_size=g_batch_size)
        # tr_ite  = iter(strategy.experimental_distribute_dataset(tr_data.ds))
        tr_ite = iter(tr_data.ds)

        # create tf graph
        d_graph = tf.function(d_step)
        g_graph = tf.function(g_step)

        # training loop
        print("======= Create training loop =======")
Ejemplo n.º 6
0
def dann(encoder, classifier, discriminator, source_train_loader,
         target_train_loader, save_name):
    print("DANN training")
    for epoch in range(params.epochs):
        print('Epoch : {}'.format(epoch))

        encoder = encoder.train()
        classifier = classifier.train()
        discriminator = discriminator.train()

        classifier_criterion = nn.CrossEntropyLoss().cuda()
        discriminator_criterion = nn.CrossEntropyLoss().cuda()

        start_steps = epoch * len(source_train_loader)
        total_steps = params.epochs * len(target_train_loader)

        optimizer = optim.SGD(list(encoder.parameters()) +
                              list(classifier.parameters()) +
                              list(discriminator.parameters()),
                              lr=0.01,
                              momentum=0.9)

        for batch_idx, (source_data, target_data) in enumerate(
                zip(source_train_loader, target_train_loader)):

            source_image, source_label = source_data
            target_image, target_label = target_data

            p = float(batch_idx + start_steps) / total_steps
            alpha = 2. / (1. + np.exp(-10 * p)) - 1

            #             source_image = torch.cat((source_image, source_image, source_image), 1)

            source_image, source_label = source_image.cuda(
            ), source_label.cuda()
            target_image, target_label = target_image.cuda(
            ), target_label.cuda()

            combined_image = torch.cat((source_image, target_image), 0)
            # Auge label
            combined_label = torch.cat((source_label, target_label), 0)

            optimizer = utils.optimizer_scheduler(optimizer=optimizer, p=p)
            optimizer.zero_grad()

            combined_feature = encoder(combined_image)
            source_feature = encoder(source_image)

            # 1.Classification loss
            # class_pred = classifier(source_feature)
            # class_loss = classifier_criterion(class_pred, source_label)

            # Auge
            class_pred = classifier(combined_feature)

            #             print('PRED: ', class_pred.size())
            #             print('LABEL: ', combined_label.size())
            class_loss = classifier_criterion(class_pred, combined_label)

            # 2. Domain loss
            domain_pred = discriminator(combined_feature, alpha)

            domain_source_labels = torch.zeros(source_label.shape[0]).type(
                torch.LongTensor)
            domain_target_labels = torch.ones(target_label.shape[0]).type(
                torch.LongTensor)
            domain_combined_label = torch.cat(
                (domain_source_labels, domain_target_labels), 0).cuda()
            domain_loss = discriminator_criterion(domain_pred,
                                                  domain_combined_label)

            total_loss = class_loss + domain_loss
            total_loss.backward()
            optimizer.step()

            if (batch_idx + 1) % 50 == 0:
                print(
                    '[{}/{} ({:.0f}%)]\tLoss: {:.6f}\tClass Loss: {:.6f}\tDomain Loss: {:.6f}'
                    .format(batch_idx * len(target_image),
                            len(target_train_loader.dataset),
                            100. * batch_idx / len(target_train_loader),
                            total_loss.item(), class_loss.item(),
                            domain_loss.item()))

        if (epoch + 1) % 10 == 0:
            test.tester(encoder,
                        classifier,
                        discriminator,
                        source_test_loader,
                        target_test_loader,
                        training_mode='dann')

        tensorboard(encoder, classifier, target_test_loader,
                    target_train_loader, epoch)

    save_model(encoder, classifier, discriminator, 'source', save_name)
    visualize(encoder, 'source', save_name)
Ejemplo n.º 7
0
    z = Enc(x)
    z_stu = Stu(z, a)
    x_fake = Gen(z_stu, a - a)
    d, att = Dis(x)

    lr = tf.Variable(initial_value=0., trainable=False)
    g_opt = tf.optimizers.Adam(lr, beta_1=0., beta_2=0.99)
    d_opt = tf.optimizers.Adam(lr, beta_1=0., beta_2=0.99)
    params = tf.Variable(initial_value=[5, 0], trainable=False, dtype=tf.int64)

    sample_dir = './output/%s/%s' % (experiment_name,
                                     experiment_name + "_test")
    if os.path.exists(sample_dir):
        shutil.rmtree(sample_dir)
    os.makedirs(sample_dir, exist_ok=True)
    tester = test.tester(sample_dir, batch_size)

    checkpoint_dir = './output/%s/trained_model' % experiment_name
    checkpoint = tf.train.Checkpoint(params=params,
                                     d_opt=d_opt,
                                     g_opt=g_opt,
                                     Gen=Gen,
                                     Dis=Dis,
                                     Enc=Enc,
                                     Stu=Stu)

    manager = tf.train.CheckpointManager(checkpoint,
                                         checkpoint_dir,
                                         max_to_keep=3)
    checkpoint.restore(manager.latest_checkpoint)
    if manager.latest_checkpoint:
Ejemplo n.º 8
0
def trainer(exp_name,
            train_data_loader,
            train_tile_borders,
            cfg,
            val_data_loader=None,
            val_tile_borders=None,
            DEBUG=False,
            use_tensorboard=True):
    net, optimizer, criterion, start_epoch = exp.load_exp(exp_name)

    if torch.cuda.is_available():
        net.cuda()
        criterion = criterion.cuda()
    net.train()  # Change model to 'train' mode

    # set up TensorBoard
    experiment, use_tensorboard = exp.setup_crayon(use_tensorboard,
                                                   CrayonClient, exp_name)

    # Training setting
    log_iter_interval = cfg['log_iter_interval']
    snapshot_epoch_interval = cfg['snapshot_epoch_interval']
    num_epochs = cfg['num_epochs']
    accumulated_batch_size = cfg['train']['accumulated_batch_size']

    # Train the Model
    for epoch in range(start_epoch, num_epochs + 1):
        epoch_start = time.time()

        # initialize epoch stats
        epoch_train_loss = 0
        epoch_train_accuracy = 0
        accumulated_batch_loss = 0

        print('Epoch [%d/%d] starts' % (epoch, num_epochs))

        for i, (img_name, images, targets) in enumerate(train_data_loader):
            iter_start = time.time()

            # convert to FloatTensor
            images = images.float()
            targets = targets.float()

            images = Variable(images)
            targets = Variable(targets)

            if torch.cuda.is_available():
                images = images.cuda()
                targets = targets.cuda()

            outputs = net(images)

            # remove tile borders
            images = tile.remove_tile_borders(images, train_tile_borders)
            outputs = tile.remove_tile_borders(outputs, train_tile_borders)
            targets = tile.remove_tile_borders(targets, train_tile_borders)

            loss = criterion(outputs, targets)

            # generate prediction
            masks = (outputs > 0.5).float()

            accuracy = evaluation.dice_loss(masks, targets)
            epoch_train_accuracy += accuracy

            # Backward pass
            loss.backward()
            accumulated_batch_loss += (loss.data[0] / accumulated_batch_size)

            # Update epoch stats
            epoch_train_loss += loss.data[0]

            # Log Training Progress
            if (i + 1) % log_iter_interval == 0:
                print(
                    'Epoch [%d/%d] Iter [%d/%d] Loss: %.3f Accumd Loss:%.4f Accuracy: %.5f'
                    % (epoch, num_epochs, i + 1, len(train_data_loader),
                       loss.data[0], accumulated_batch_loss, accuracy))

            if DEBUG and accuracy < 0.98:
                print('Epoch {}, Iter {}, {}: Loss {:.5f}, Accuracy: {:.6f}'.
                      format(epoch, i, img_name, loss.data[0], accuracy))

                # convert to numpy array
                image = images.data[0].cpu().numpy()
                mask = masks.data[0].cpu().numpy()
                target = targets.data[0].cpu().numpy()

                viz.visualize(image, mask, target)

            if (i + 1) % accumulated_batch_size == 0:
                optimizer.step()

                # reset
                optimizer.zero_grad()
                accumulated_batch_loss = 0

            iter_end = time.time()
            # Log Training Progress
            if (i + 1) % log_iter_interval == 0:
                print('Time Spent: {:.2f} sec'.format(iter_end - iter_start))

        # inner for loop ends

        epoch_train_loss /= len(train_data_loader)
        epoch_train_accuracy /= len(train_data_loader)

        # Validate
        if val_data_loader is not None:
            epoch_val_loss, epoch_val_accuracy = test.tester(exp_name,
                                                             val_data_loader,
                                                             val_tile_borders,
                                                             net,
                                                             criterion,
                                                             is_val=True)

        if use_tensorboard:
            experiment.add_scalar_value('train loss',
                                        epoch_train_loss,
                                        step=epoch)
            experiment.add_scalar_value('train accuracy',
                                        epoch_train_accuracy,
                                        step=epoch)

            if val_data_loader is not None:
                experiment.add_scalar_value('val loss',
                                            epoch_val_loss,
                                            step=epoch)
                experiment.add_scalar_value('val accuracy',
                                            epoch_val_accuracy,
                                            step=epoch)

            # experiment.add_scalar_value('learning_rate', lr, step=epoch)

        # Save the trained model
        if epoch % snapshot_epoch_interval == 0:
            exp.save_checkpoint(exp_name, epoch, net.state_dict(),
                                optimizer.state_dict())

        epoch_end = time.time()
        print('Epoch [%d/%d] Loss: %.4f Accuracy: %.5f Time Spent: %.2f sec' %
              (epoch, num_epochs, epoch_train_loss, epoch_train_accuracy,
               epoch_end - epoch_start))

    # outer for loop ends

    return
Ejemplo n.º 9
0
while True:
    # Read and display initial frame from webcam
    ret, frame = cam.read()
    cv2.imshow("Current_Stream", frame)

    # Wait for keystroke
    k = cv2.waitKey(1)

    if not ret:
        break

    # Actions for when SPACE is pressed
    if k%256 == 32:
        # Concatenate image path/filename
        img_name = "webcam_image/opencv_frame.jpg"
        # Save captured frame
        cv2.imwrite(img_name, frame)
        print("The file {} was created!".format(img_name))
        # Increment counter to keep track of captured frames
        img_counter += 1
        #calling the tester function from the test.py script
        tester()

    # Actions for when ESC is pressed
    elif k%256 == 27:
        print("ESC key entered, now closing application!")
        break

cam.release()
cv2.destroyAllWindows()
Ejemplo n.º 10
0
Archivo: main.py Proyecto: yk287/NLP
opts = options.parse()

#data loader
data_loader = data.dataloader(opts)
train_loader = util.create_dataset(data_loader.train_data,
                                   data_loader.letteridx, data_loader.labelidx,
                                   opts)
test_loader = util.create_dataset(data_loader.test_data, data_loader.letteridx,
                                  data_loader.labelidx, opts)

from network import RNN
from train import trainer
from test import tester
'''RNN model'''
RNN = RNN(opts, data_loader.letteridx).to(device)

if opts.print_model:
    print(RNN)
'''Optimizers'''
import torch.optim as optim

RNN_optim = optim.Adam(RNN.parameters(),
                       lr=opts.lr,
                       betas=(opts.beta1, opts.beta2))
'''Criterion'''
criterion = nn.NLLLoss()
'''run training'''
trainer(opts, RNN, RNN_optim, criterion, train_loader)
'''test'''
tester(opts, RNN, test_loader)
Ejemplo n.º 11
0
def main(args):

    train_dir = args.train_dir
    train_csv = args.train_csv
    test_dir = args.test_dir
    test_csv = args.test_csv

    ratio = args.train_valid_ratio
    batch_size = args.batch_size
    epochs = args.epochs

    train_flag = args.train
    pretrain_weight = args.pretrain_weight
    verbose = args.verbose

    if (train_flag == 0):
        if (verbose == 2):
            print("Reading Training Data...")

        train_csv = pd.read_csv(train_csv)
        train_csv, valid_csv = train_valid_split(train_csv, ratio)

        train = RetinopathyDataset(train_csv, train_dir)
        valid = RetinopathyDataset(valid_csv, train_dir)

        if (verbose == 2):
            print("Creating DataLoader...")

        train_dataloader = DataLoader(train,
                                      batch_size=batch_size,
                                      shuffle=True,
                                      num_workers=4)
        valid_dataloader = DataLoader(valid,
                                      batch_size=batch_size * 4,
                                      shuffle=False,
                                      num_workers=4)

        if (verbose == 2):
            print("Creating EfficientNet Model...")

        model = EfficientNetFinetune(
            level="efficientnet-b5",
            finetune=False,
            pretrain_weight="./weights/pretrained/aptos2018.pth")

        trainer = Trainer(model,
                          train_dataloader,
                          valid_dataloader,
                          epochs,
                          early_stop="QK",
                          verbose=verbose)

        if (verbose == 2):
            print("Strat Training...")
        trainer.train()

    if (train_flag == 1):
        if (verbose == 2):
            print("Strat Predicting...")

        test_csv = pd.read_csv(test_csv)
        test = RetinopathyDataset(test_csv, test_dir, test=True)
        test_dataloader = DataLoader(test,
                                     batch_size=batch_size * 4,
                                     shuffle=False,
                                     num_workers=4)
        model = EfficientNetFinetune(level="efficientnet-b5",
                                     finetune=False,
                                     test=True,
                                     pretrain_weight=pretrain_weight)
        tester(model, test_dataloader, verbose)
Ejemplo n.º 12
0
from methods import extract_hog, get_svm_detector, train_svm, hog_detect
from dataset_class import Dataset


def logger_init():
    # 获取logger实例,如果参数为空则返回root logger
    logger = logging.getLogger("PedestranDetect")

    formatter = logging.Formatter('%(asctime)s %(levelname)-8s: %(message)s')
    console_handler = logging.StreamHandler(sys.stdout)
    console_handler.formatter = formatter
    logger.addHandler(console_handler)
    logger.setLevel(logging.INFO)

    return logger


if __name__ == '__main__':
    file_path = 'data/test.mp4'
    logger = logger_init()
    dataset = Dataset()
    pos, neg, test = dataset.load_data_set(logger=logger)
    samples, labels = dataset.load_train_samples(pos, neg)
    train = extract_hog(samples, logger=logger)
    logger.info('Size of feature vectors of samples: {}'.format(train.shape))
    logger.info('Size of labels of samples: {}'.format(labels.shape))
    svm_detector = train_svm(train, labels, logger=logger)
    hog_detect(test, svm_detector, logger)
    tester(file_path)
Ejemplo n.º 13
0
 def schedule_tester(self, cycle=TESTER_CYCLE):
     self.tester = tester()
     while True:
         print("  测试器开始运行  ")
         self.tester.run()
         time.sleep(cycle)