Exemplo n.º 1
0
# SSD的损失函数
criterion = ssd_loss.MultiBoxLoss(CONFIG, CONFIG['num_classes'], 0.5, True, 0,
                                  True, 3, 0.5, False, CONFIG["USE_GPU"])

train_loader = t.utils.data.DataLoader(dataset,
                                       CONFIG["BATCH_SIZE"],
                                       shuffle=True,
                                       collate_fn=ssd_utils.detection_collate)

if FROM_TRAIN_ITER > 1:
    net.load_state_dict(t.load("outputs/SSD_%03d.pth" % (FROM_TRAIN_ITER - 1)))

index = 0
step_index = 0
# predict = j_m_ssd.SSDPredict(CONFIG["CLASSES"])
bar = j_bar.ProgressBar(CONFIG["EPOCH"], len(train_loader),
                        "Loss : %.3f; Total Loss : %.3f")

predict = ssd_predict.SSDPredict(CONFIG["CLASSES"])
net.train()
log = logger.Logger(CONFIG["LOG_DIR"])
for epoch in range(FROM_TRAIN_ITER, CONFIG["EPOCH"] + 1):
    total_loss = 0.
    t.cuda.empty_cache()
    for i, (images, targets) in enumerate(train_loader):
        index += i
        if epoch >= 30:
            LEARNING_RATE = 0.0005
        if epoch >= 50:
            LEARNING_RATE = 0.00025
        if epoch >= 80:
            LEARNING_RATE = 0.00001
Exemplo n.º 2
0
                break
        else:
            break
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr / batch_size
    return lr


processed_batches = 0
if FROM_TRAIN_ITER > 1:
    model.load_state_dict(
        torch.load("outputs/YOLOV3_%03d.pth" % (FROM_TRAIN_ITER - 1)))
log = logger.Logger("logs/")
predict = yolo_predict.YoloV3Predict(CONFIG["CLASSES"])
LEARNING_RATE = learning_rate
bar = j_bar.ProgressBar(max_epochs, len(train_loader),
                        "Loss:%.3f;Total Loss:%.3f")
for epoch in range(FROM_TRAIN_ITER, max_epochs + 1):
    model.train()
    total_loss = 0
    torch.cuda.empty_cache()
    # if epoch >= 1:
    #     LEARNING_RATE = 0.01
    # if epoch >= 30:
    #     LEARNING_RATE = 0.005
    # if epoch >= 60:
    #     LEARNING_RATE = 0.001
    # if epoch >= 90:
    #     LEARNING_RATE = 0.0005
    # if epoch >= 120:
    #     LEARNING_RATE = 0.00025
    # for param_group in optimizer.param_groups:
Exemplo n.º 3
0
    NetG = NetG.cuda()
    NetD = NetD.cuda()
    x = x.cuda()
    z = z.cuda()
    z_test = z_test.cuda()
    one, one_neg = one.cuda(), one_neg.cuda()

x = t.autograd.Variable(x)
z = t.autograd.Variable(z)
z_test = t.autograd.Variable(z_test)

optimizerD = t.optim.RMSprop(NetD.parameters(), lr=CONFIG["LEARNING_RATE"])
optimizerG = t.optim.RMSprop(NetG.parameters(), lr=CONFIG["LEARNING_RATE"])

gen_iterations = 0
bar = j_bar.ProgressBar(CONFIG["EPOCH"], len(dataset), "D Loss:%.3f;G Loss:%.3f")
for epoch in range(1, CONFIG["EPOCH"] + 1):
    i = 0
    data_iter = iter(dataset)

    while i < len(dataset):

        for p in NetD.parameters():
            p.requires_grad = True

        if gen_iterations < 25 or gen_iterations % 500 == 0:
            N_critic = 100
        else:
            N_critic = 5

        t1 = 0
Exemplo n.º 4
0
dataset = j_data.MNISTDataSetForPytorch(
    radio=0.9, transform=torchvision.transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(dataset=dataset,
                                           batch_size=CONFIG["BATCH_SIZE"],
                                           shuffle=True)

conv1 = cnn_layer.Conv2D([CONFIG["BATCH_SIZE"], 28, 28, 1], 12, 5, 1)
relu1 = cnn_layer.Relu(conv1.output_shape)
pool1 = cnn_layer.MaxPooling(relu1.output_shape)
conv2 = cnn_layer.Conv2D(pool1.output_shape, 24, 3, 1)
relu2 = cnn_layer.Relu(conv2.output_shape)
pool2 = cnn_layer.MaxPooling(relu2.output_shape)
fc = cnn_layer.FullyConnect(pool2.output_shape, 10)
sf = cnn_layer.Softmax(fc.output_shape)

bar = j_bar.ProgressBar(CONFIG["EPOCHS"], len(train_loader), "train:%.3f,%.3f")

for epoch in range(1, CONFIG["EPOCHS"] + 1):
    batch_loss = 0
    batch_acc = 0
    val_acc = 0
    val_loss = 0

    # train
    train_acc = 0
    train_loss = 0
    for i, (train_image, train_label) in enumerate(train_loader):
        img = train_image.data.numpy().transpose((0, 2, 3, 1))
        label = train_label.data.numpy().squeeze()

        conv1_out = relu1.forward(conv1.forward(img))
Exemplo n.º 5
0
    shuffle=True,
    transform=tv.transforms.Compose(
        [tv.transforms.ToTensor(), lambda x: 2 * (x - 0.5)]),
    train=True,
    seen=cur_model.seen,
    batch_size=BATCH_SIZE),
                                       batch_size=BATCH_SIZE,
                                       shuffle=False)

predict = yolo_predict.YoloV2Predict(CONFIG["CLASSES"])

if FROM_TRAIN_ITER > 1:
    model.load_state_dict(
        t.load("outputs/YOLOV2_%03d.pth" % (FROM_TRAIN_ITER - 1)))
LEARNING_RATE = learning_rate / BATCH_SIZE
bar = j_bar.ProgressBar(CONFIG["EPOCHS"], len(train_loader),
                        "Loss:%.3f;Avg Loss:%.3f")
log = logger.Logger(CONFIG["LOG_DIR"])
for epoch in range(FROM_TRAIN_ITER, CONFIG["EPOCHS"] + 1):
    t.cuda.empty_cache()
    lr = adjust_learning_rate(optimizer, processed_batches)
    model.train()
    total_loss = 0
    if epoch >= 30:
        LEARNING_RATE = 0.001 / BATCH_SIZE
    if epoch >= 60:
        LEARNING_RATE = 0.0008 / BATCH_SIZE
    if epoch >= 90:
        LEARNING_RATE = 0.001 / BATCH_SIZE
    if epoch >= 120:
        LEARNING_RATE = 0.00007 / BATCH_SIZE
Exemplo n.º 6
0
###########   LOSS & OPTIMIZER   ##########
optimizerD = t_optim.Adam(Net_D.parameters(),lr=0.0001, betas=(0.5, 0.999))
optimizerG = t_optim.Adam(Net_G.parameters(),lr=0.0001, betas=(0.5, 0.999))

dataset = j_data.Cifar10DataSetForPytorch(train=True, transform=tv.transforms.Compose(
    [
        tv.transforms.ToTensor(),
        # Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))
     ]))
train_loader = t.utils.data.DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)

noise = t.randn(100, NOISE_DIM)
noise_var =  t_auto.Variable(noise.cuda() if GPU_NUMS > 0 else noise)

k = 0
proBar = j_bar.ProgressBar(EPOCHS, len(train_loader), "D Loss:%.3f;G Loss:%.3f")
for epoch in range(1, EPOCHS + 1):
    for index, (images,_) in enumerate(train_loader):
        mini_batch = images.shape[0]
        noise = t_auto.Variable(t.FloatTensor(mini_batch, NOISE_DIM, 1, 1).cuda() if GPU_NUMS > 0 else t.FloatTensor(mini_batch, NOISE_DIM, 1, 1))
        real =  t_auto.Variable(t.FloatTensor(mini_batch, IMAGE_CHANNEL, IMAGE_SIZE, IMAGE_SIZE).cuda() if GPU_NUMS > 0 else t.FloatTensor(mini_batch, IMAGE_CHANNEL, IMAGE_SIZE, IMAGE_SIZE))
        label =  t_auto.Variable(t.FloatTensor(1).cuda() if GPU_NUMS > 0 else t.FloatTensor(1))

        Net_D.zero_grad()
        real.data.resize_(images.size()).copy_(images)

        # generate fake data
        noise.data.resize_(images.size(0), NOISE_DIM)
        noise.data.uniform_(-1,1)
        fake = Net_G(noise)
Exemplo n.º 7
0
    state_dict = torch.load(FCNConfig["VGG16_MODEL_PATH"])
    vgg.load_state_dict(state_dict)
    model.init_vgg16_params(vgg)
else:
    model.load_state_dict(
        torch.load("outputs/FCN8s_%03d.pth" % (FROM_TRAIN_ITER - 1)))

if FCNConfig["USE_GPU"]:
    model = model.cuda()
LEARNING_RATE = FCNConfig["LEARNING_RATE"]
optim = torch.optim.SGD(model.parameters(),
                        lr=FCNConfig["LEARNING_RATE"],
                        momentum=FCNConfig["MOMENTUM"],
                        weight_decay=FCNConfig["WEIGHT_DECAY"])

bar = j_bar.ProgressBar(FCNConfig["EPOCH"], len(train_loader),
                        "Loss : %.3f; Total Loss : %.3f")

for epoch in (FROM_TRAIN_ITER, FCNConfig["EPOCHS"] + 1):
    model.train()
    total_loss = 0

    for param_group in optim.param_groups:
        param_group['lr'] = LEARNING_RATE

    for batch_idx, (images, targets) in enumerate(train_loader):
        images = torch.autograd.Variable(
            images.cuda() if FCNConfig["USE_GPU"] else images)
        targets = torch.autograd.Variable(
            targets.cuda() if FCNConfig["USE_GPU"] else targets)

        optim.zero_grad()
Exemplo n.º 8
0
D_optimizer = t_optim.Adam(Net_D.parameters(), lr=0.0002, betas=(0.5, 0.999))

train_set = j_data.DataSetFromFolderForPix2Pix(
    os.path.join("/input/facades_fixed", "train"))
test_set = j_data.DataSetFromFolderForPix2Pix(
    os.path.join("/input/facades_fixed", "test"))
train_data_loader = t.utils.data.DataLoader(dataset=train_set,
                                            batch_size=CONFIG["BATCH_SIZE"],
                                            shuffle=True)
test_data_loader = t.utils.data.DataLoader(dataset=test_set,
                                           batch_size=CONFIG["BATCH_SIZE"],
                                           shuffle=True)

test_input, test_target = test_data_loader.__iter__().__next__()

bar = j_bar.ProgressBar(CONFIG["EPOCH"], len(train_data_loader),
                        "D loss:%.3f;G loss:%.3f")
for epoch in range(1, CONFIG["EPOCH"] + 1):
    for i, (input, target) in enumerate(train_data_loader):
        x_ = t_auto.Variable(input.cuda() if CONFIG["GPU_NUM"] > 0 else input)
        y_ = t_auto.Variable(
            target.cuda() if CONFIG["GPU_NUM"] > 0 else target)

        # Train discriminator with real data
        D_real_decision = Net_D(x_, y_).squeeze()
        real_ = t_auto.Variable(
            t.ones(D_real_decision.size()).cuda(
            ) if CONFIG["GPU_NUM"] > 0 else t.ones(D_real_decision.size()))
        D_real_loss = BCE_loss(D_real_decision, real_)

        # Train discriminator with fake data
        gen_image = Net_G(x_)
Exemplo n.º 9
0
z = t.autograd.Variable(z)
z_test = t.autograd.Variable(z_test)

# Optimizer
optimizerD = t.optim.Adam(NetD.parameters(), lr=CONFIG["LEARNING_RATE"], betas=(0, .9))
optimizerG = t.optim.Adam(NetG.parameters(), lr=CONFIG["LEARNING_RATE"], betas=(0, .9))

def generate_random_sample():
    while True:
        random_indexes = numpy.random.choice(dataset.__len__(), size=CONFIG["BATCH_SIZE"], replace=False)
        batch = [dataset[i][0] for i in random_indexes]
        yield t.stack(batch, 0)
random_sample = generate_random_sample()

## Fitting model
bar = j_bar.ProgressBar(1, 5000, "D Loss%.3f;G Loss%.3f")
for i in range(1, 5000 + 1):
    for p in NetD.parameters():
        p.requires_grad = True

    for j in range(5):

        ########################
        # (1) Update D network #
        ########################

        NetD.zero_grad()

        # Sample real data
        real_images = random_sample.__next__()
        real_images = real_images.cuda() if CONFIG["GPU_NUMS"] > 0 else real_images
Exemplo n.º 10
0
    def train(self):
        self.model.train()
        self.optim.zero_grad()

        iteration = 0
        for epoch in range(cfg.SOLVER.MAX_EPOCH):
            if epoch == cfg.TRAIN.REINFORCEMENT.START:
                self.rl_stage = True
            self.setup_loader(epoch)

            start = time.time()
            data_time = AverageMeter()
            batch_time = AverageMeter()
            losses = AverageMeter()
            if not self.distributed or self.args.local_rank == 0:
                pbar = ProgressBar(n_total=len(self.training_loader),
                                   desc='Training')
            val = self.eval(epoch)
            for step, (indices, input_seq, target_seq, gv_feat, att_feats,
                       att_mask, image_ids,
                       dataset_name) in enumerate(self.training_loader):

                data_time.update(time.time() - start)

                input_seq = input_seq.cuda()
                target_seq = target_seq.cuda()
                gv_feat = gv_feat.cuda()
                att_feats = att_feats.cuda()
                att_mask = att_mask.cuda()

                kwargs = self.make_kwargs(indices, input_seq, target_seq,
                                          gv_feat, att_feats, att_mask)
                loss, loss_info = self.forward(kwargs)
                loss.backward()
                utils.clip_gradient(self.optim.optimizer, self.model,
                                    cfg.SOLVER.GRAD_CLIP_TYPE,
                                    cfg.SOLVER.GRAD_CLIP)
                self.optim.step()
                self.optim.zero_grad()
                self.optim.scheduler_step('Iter')

                batch_time.update(time.time() - start)
                start = time.time()
                losses.update(loss.item())

                self.summary(iteration, loss, image_ids, dataset_name)
                self.display(iteration, data_time, batch_time, losses,
                             loss_info)
                iteration += 1

                if self.distributed:
                    dist.barrier()
                if not self.distributed or self.args.local_rank == 0:
                    pbar(step)

            self.save_model(epoch)
            val = self.eval(epoch)
            self.optim.scheduler_step('Epoch', val)
            self.scheduled_sampling(epoch)

            if self.distributed:
                dist.barrier()