Ejemplo n.º 1
0
    net = net.cuda()

net.train()

optimizer = SGD(net.parameters(),
                lr=warm_lr,
                momentum=YOLOv3Config["MOMENTUM"],
                weight_decay=YOLOv3Config["WEIGHT_DECAY"])

data_loader = DataLoader(ListDataset(
    "/input",
    target_transform=AnnotationTransform(classes=YOLOv3Config["CLASSES"])),
                         batch_size=YOLOv3Config["BATCH_SIZE"],
                         shuffle=False)

bar = ProgressBar(YOLOv3Config["EPOCHS"], len(data_loader), "Loss:%.3f")

for epoch in range(1, YOLOv3Config["EPOCHS"]):
    for i, (imgs, targets) in enumerate(data_loader):
        imgs = Variable(imgs.cuda() if YOLOv3Config["GPU_NUMS"] > 0 else imgs)
        # targets = [anno.cuda() for anno in targets] if CFG["GPU_NUMS"] > 0 else [anno for anno in targets]

        optimizer.zero_grad()
        loss = net(imgs, targets)

        loss.backward()
        optimizer.step()

        bar.show(epoch, loss.item())

    torch.save(net.state_dict(), "YOLOv3_%3d.pth" % epoch)
Ejemplo n.º 2
0
Predict_Noise_var = Variable(
    torch.randn(100, CONFIG["NOISE_DIM"]).cuda(
    ) if CONFIG["GPU_NUMS"] > 0 else torch.randn(100, CONFIG["NOISE_DIM"]))

temp_z_ = torch.randn(10, 100)
fixed_z_ = temp_z_
Predict_y = torch.zeros(10, 1)
for i in range(1, 10):
    fixed_z_ = torch.cat([fixed_z_, temp_z_], 0)
    temp = torch.zeros(10, 1) + (i % 2)
    Predict_y = torch.cat([Predict_y, temp], 0)

Predict_y = one_hot(Predict_y.long())
Predict_y = Variable(Predict_y.cuda() if CONFIG["GPU_NUMS"] > 0 else Predict_y)

bar = ProgressBar(CONFIG["EPOCHS"], len(train_loader),
                  "D Loss:%.3f, G Loss:%.3f")
for epoch in range(1, CONFIG["EPOCHS"] + 1):
    if epoch % 20 == 0:
        G_optimizer.param_groups[0]['lr'] /= 10
        D_optimizer.param_groups[0]['lr'] /= 10

    for img_real, label_real in train_loader:
        mini_batch = label_real.shape[0]

        label_true = torch.ones(mini_batch)
        label_false = torch.zeros(mini_batch)
        label_true_var = Variable(
            label_true.cuda() if CONFIG["GPU_NUMS"] > 0 else label_true)
        label_false_var = Variable(
            label_false.cuda() if CONFIG["GPU_NUMS"] > 0 else label_false)
criterion = BCELoss()

true_labels = Variable(t.ones(CONFIG["BATCH_SIZE"]))
fake_labels = Variable(t.zeros(CONFIG["BATCH_SIZE"]))
fix_noises = Variable(t.randn(CONFIG["BATCH_SIZE"], CONFIG["NOISE_DIM"], 1, 1))
noises = Variable(t.randn(CONFIG["BATCH_SIZE"], CONFIG["NOISE_DIM"], 1, 1))

if CONFIG["GPU_NUMS"] > 0:
    netD.cuda()
    netG.cuda()
    criterion.cuda()
    true_labels, fake_labels = true_labels.cuda(), fake_labels.cuda()
    fix_noises, noises = fix_noises.cuda(), noises.cuda()

proBar = ProgressBar(CONFIG["EPOCHS"], len(dataLoader),
                     "D Loss:%.3f;G Loss:%.3f")
for epoch in range(1, CONFIG["EPOCHS"] + 1):
    if epoch % 30 == 0:
        optimizer_discriminator.param_groups[0]['lr'] /= 10
        optimizer_generator.param_groups[0]['lr'] /= 10

    for ii, (img, _) in enumerate(dataLoader):
        real_img = Variable(img.cuda() if CONFIG["GPU_NUMS"] > 1 else img)

        if ii % 1 == 0:
            # 训练判别器
            netD.zero_grad()
            ## 尽可能的把真图片判别为正确
            output = netD(real_img)
            error_d_real = criterion(output, true_labels)
            error_d_real.backward()
Ejemplo n.º 4
0
        self.hidden = torch.nn.Linear(n_feature, n_hidden)  # hidden layer
        self.out = torch.nn.Linear(n_hidden, n_output)  # output layer

    def forward(self, x):
        x = F.relu(self.hidden(x))  # activation function for hidden layer
        x = self.out(x)
        return x


net = Net(n_feature=2, n_hidden=10, n_output=2)  # define the network

optimizer = torch.optim.RMSprop(net.parameters(), lr=0.01)
loss_func = torch.nn.CrossEntropyLoss(
)  # the target label is NOT an one-hotted

bar = ProgressBar(1, STEPS, "Loss:%.9f, Accuracy:%.3f")
predict = []
myloss = []
N, M = 50, 50  # 横纵各采样多少个值
x1_min, x2_min = x_data.min()
x1_max, x2_max = x_data.max()
t1 = np.linspace(x1_min, x1_max, N)
t2 = np.linspace(x2_min, x2_max, M)
x1, x2 = np.meshgrid(t1, t2)  # 生成网格采样点
x_show = np.stack((x1.flat, x2.flat), axis=1)  # 测试点

for step in range(STEPS):
    out = net(x)  # input x and predict based on x
    loss = loss_func(
        out, y
    )  # must be (1. nn output, 2. target), the target label is NOT one-hotted
Ejemplo n.º 5
0
    x = x.clamp(0, 1)
    x = x.view(x.size(0), 1, 28, 28)
    return x

EPOCH = 100
BATCH_SIZE = 128
learning_rate = 1e-3

img_transform = transforms.Compose([
    transforms.ToTensor()
    # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

train_data = MNISTDataSetForPytorch(train=True, transform=torchvision.transforms.ToTensor())
train_loader = DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True)
proBar = ProgressBar(EPOCH, len(train_loader), "loss:%.3f")

class VAE(nn.Module):
    def __init__(self):
        super(VAE, self).__init__()

        self.fc1 = nn.Linear(784, 400)
        self.fc21 = nn.Linear(400, 20)
        self.fc22 = nn.Linear(400, 20)
        self.fc3 = nn.Linear(20, 400)
        self.fc4 = nn.Linear(400, 784)

    def encode(self, x):
        h1 = F.relu(self.fc1(x))
        return self.fc21(h1), self.fc22(h1)
Ejemplo n.º 6
0
        self.map3 = Linear(hidden_size, output_size)

    def forward(self, x):
        x = leaky_relu(self.map1(x), 0.1)
        x = leaky_relu(self.map2(x), 0.1)
        return sigmoid(self.map3(x))
generator = SimpleMLP(input_size=z_dim, hidden_size=50, output_size=DIMENSION)
discriminator = SimpleMLP(input_size=DIMENSION, hidden_size=100, output_size=1)
if GPU_NUMS > 0:
    generator.cuda()
    discriminator.cuda()
criterion = BCELoss()

d_optimizer = Adadelta(discriminator.parameters(), lr=1)
g_optimizer = Adadelta(generator.parameters(), lr=1)
progBar = ProgressBar(1, iterations, "D Loss:(real/fake) %.3f/%.3f,G Loss:%.3f")
for train_iter in range(1, iterations + 1):
    for d_index in range(3):
        # 1. Train D on real+fake
        discriminator.zero_grad()

        #  1A: Train D on real
        real_samples = sample_2d(lut_2d, bs)
        d_real_data = Variable(torch.Tensor(real_samples))
        if GPU_NUMS > 0:
            d_real_data = d_real_data.cuda()
        d_real_decision = discriminator(d_real_data)
        labels = Variable(torch.ones(bs))
        if GPU_NUMS > 0:
            labels = labels.cuda()
        d_real_loss = criterion(d_real_decision, labels)  # ones = true
d_optimizer = torch.optim.Adam(NetD.parameters(), lr=0.001, betas=(0.5, 0.999))

fixed_noise = torch.Tensor(np.zeros((CONFIG["NOISE_DIM"], CONFIG["Z_DIM"])))
fixed_noise_var = torch.autograd.Variable(fixed_noise.cuda() if CONFIG["GPU_NUMS"] > 0 else fixed_noise)
tmp = np.zeros((CONFIG["NOISE_DIM"], CONFIG["CC_DIM"]))
for k in range(10):
    tmp[k * 10:(k + 1) * 10, 0] = np.linspace(-2, 2, 10)
fixed_cc = torch.Tensor(tmp)
fixed_cc_var =torch.autograd.Variable(fixed_cc.cuda() if CONFIG["GPU_NUMS"] > 0 else fixed_cc)

tmp = np.zeros((CONFIG["NOISE_DIM"], CONFIG["DC_DIM"]))
for k in range(10):
    tmp[k * 10 : (k + 1) * 10, k] = 1
fixed_dc = torch.Tensor(tmp)
fixed_dc_var = torch.autograd.Variable(fixed_dc.cuda() if CONFIG["GPU_NUMS"] > 0 else fixed_dc)
bar = ProgressBar(CONFIG["EPOCH"], len(data_loader), "D Loss:%.3f;G Loss:%.3f")
for epoch in range(1, CONFIG["EPOCH"] + 1):
    for i, (images, labels) in enumerate(data_loader):
        images = torch.autograd.Variable(images.cuda() if CONFIG["GPU_NUMS"] > 0 else images)

        mini_batch = images.size(0)

        cc = torch.Tensor(np.random.randn(mini_batch, CONFIG["CC_DIM"]) * 0.5 + 0.0)
        cc_var = torch.autograd.Variable(cc.cuda() if CONFIG["GPU_NUMS"] > 0 else cc)

        codes=[]
        code = np.zeros((mini_batch, CONFIG["DC_DIM"]))
        random_cate = np.random.randint(0, CONFIG["DC_DIM"], mini_batch)
        code[range(mini_batch), random_cate] = 1
        codes.append(code)
        codes = np.concatenate(codes,1)
Ejemplo n.º 8
0
    z = z.cuda()
    z_test = z_test.cuda()

optimizerD = t.optim.Adam(NetD.parameters(),lr=CONFIG["LEARNING_RATE_D"],betas=(CONFIG["BETA1"],0.999), weight_decay=0)
optimizerG = t.optim.Adam(NetG.parameters(),lr=CONFIG["LEARNING_RATE_G"],betas=(CONFIG["BETA1"],0.999), weight_decay=0)

transform=tv.transforms.Compose([
    tv.transforms.Resize((CONFIG["IMAGE_SIZE"], CONFIG["IMAGE_SIZE"])) ,
    tv.transforms.ToTensor(),
    tv.transforms.Normalize([0.5]*3,[0.5]*3)
])

dataset = dset.ImageFolder(root=CONFIG["DATA_PATH"], transform=transform)
dataloader = t.utils.data.DataLoader(dataset, batch_size=CONFIG["BATCH_SIZE"], shuffle=True)

bar = ProgressBar(CONFIG["EPOCHS"], len(dataloader), "D Loss:%.3f;G Loss:%.3f")
for epoch in range(1, CONFIG["EPOCHS"] + 1):
    for i, data_batch in enumerate(dataloader, 0):
        for p in NetD.parameters():
            p.requires_grad = True

        NetD.zero_grad()
        images, labels = data_batch
        current_batch_size = images.size(0)
        images = images.cuda() if CONFIG["GPU_NUMS"] > 0 else images
        x.data.resize_as_(images).copy_(images)
        y.data.resize_(current_batch_size).fill_(1)
        y_pred = NetD(x)
        errD_real = criterion(y_pred, y)
        errD_real.backward()
        D_real = y_pred.data.mean()
Ejemplo n.º 9
0
        labelMat.append(int(lineArr[2]))  #添加标签
    fr.close()  #关闭文件
    return dataMat, labelMat


X, Y = loadDataSet()
# data = load_iris()
# X = data.data[data.target != 0]
# y = data.target[data.target != 0]
# y[y == 1] = 0
# y[y == 2] = 1
# Y = y.reshape(-1,1)
X = np.asarray(X)
Y = np.asarray(Y)
Y = Y.reshape(-1, 1)
bar = ProgressBar(1, 1000, "loss:%.3f")


class LogisticRegression(object):
    def __init__(self):
        self.sigmoid = lambda x: 1. / (1 + np.exp(-x))

    def fit(self, X, y):
        self.w = np.random.randn(X.shape[1], 1)
        for _ in range(10000):
            y_pred = self.sigmoid(X @ self.w)
            #self.w += 0.01 * X.T @ (y - y_pred) #梯度上升
            self.w -= 0.0001 * X.T @ (y_pred - y)  # 梯度下降
            #bar.show(np.mean(0.5*(y_pred- y)**2))
    def predict(self, X):
        y_pred = np.round(self.sigmoid(X.dot(self.w)))
x_train = torch.FloatTensor(x_train)
y_train = torch.FloatTensor(y_train)

Net = Sequential(
    # BatchNorm1d(num_features=2),
    Linear(in_features=2, out_features=10),
    ReLU(inplace=True),
    Linear(in_features=10, out_features=1),
)

optimizer = RMSprop(Net.parameters(), lr=0.001)
loss_func = MSELoss()

x_data, y_data = Variable(x_train), Variable(y_train)
bar = ProgressBar(1, STEPS, "train_loss:%.9f")

predict = []
myloss = []

for step in range(STEPS):
    prediction = Net(x_data)
    loss = loss_func(prediction, y_data)

    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    bar.show(1, loss.item())
    if (step + 1) % DECAY_STEP == 0:
        predict.append(prediction.data.numpy())
        myloss.append(loss.item())
Ejemplo n.º 11
0
train_data = Cifar10DataSetForPytorch(train=True, transform=json["transform"])
train_loader = DataLoader(dataset=train_data,
                          batch_size=BATCH_SIZE,
                          shuffle=True)

# 准备网络
model = json["model"](json["pretrained"])

model = torch.nn.DataParallel(
    model).cuda() if GPU_NUMS > 1 else torch.nn.DataParallel(model)
optimizer = Adam(model.parameters(), lr=LR)
loss_func = CrossEntropyLoss().cuda() if GPU_NUMS > 0 else CrossEntropyLoss()

# 训练数据
proBar = ProgressBar(EPOCH, len(train_loader), "loss:%.3f,acc:%.3f")
for epoch in range(EPOCH):
    for step, (x, y) in enumerate(train_loader):
        data = Variable(x.cuda() if GPU_NUMS > 0 else x)
        label = Variable(
            torch.squeeze(y, dim=1).type(torch.LongTensor).cuda() if
            GPU_NUMS > 0 else torch.squeeze(y, dim=1).type(torch.LongTensor))
        optimizer.zero_grad()
        output = model(data)

        loss = loss_func(output, label)
        loss.backward()
        optimizer.step()

        prediction = torch.max(softmax(output), 1)[1]
        pred_label = prediction.data.cpu().numpy().squeeze()
Ejemplo n.º 12
0
                                              "VOC2012", "JPEGImages"),
                            list_file='utils/voc2012train.txt',
                            train=True,
                            transform=[ToTensor()])
train_loader = DataLoader(train_dataset,
                          batch_size=YOLOv1Config["BATCH_SIZE"],
                          shuffle=True)

criterion = YOLOv1Loss()
# 优化器
optimizer = SGD(Net.parameters(),
                lr=YOLOv1Config["LEARNING_RATE"],
                momentum=0.95,
                weight_decay=5e-4)
bar = ProgressBar(YOLOv1Config["EPOCHS"],
                  len(train_loader),
                  "Loss:%.3f",
                  current_epoch=FROM_TRAIN_ITER)

if FROM_TRAIN_ITER > 1:
    Net.load_state_dict(
        torch.load("output/YoloV1_%d.pth" % (FROM_TRAIN_ITER - 1)))

for epoch in range(FROM_TRAIN_ITER, YOLOv1Config["EPOCHS"]):
    if epoch == 1:
        LEARNING_RATE = 0.0005
    if epoch == 2:
        LEARNING_RATE = 0.00075
    if epoch >= 3 and epoch < 80:
        LEARNING_RATE = 0.001
    if epoch >= 80 and epoch < 100:
        LEARNING_RATE = 0.0001
Ejemplo n.º 13
0
        self.out = nn.Linear(in_features=32 * 7 * 7, out_features=10, bias=False)

    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = x.view(x.size(0), -1)
        return self.out(x)

train_data = MNISTDataSetForPytorch(train=True, transform=torchvision.transforms.ToTensor())
train_loader = data.DataLoader(dataset=train_data, batch_size=128,
                               shuffle=True)

cnn = CNN().cuda() if GPU_NUMS > 0 else CNN()
optimizer = Adam(cnn.parameters(), lr=LR)
loss_func = nn.CrossEntropyLoss().cuda() if GPU_NUMS > 0 else nn.CrossEntropyLoss()
proBar = ProgressBar(EPOCH, len(train_loader), "Loss: %.3f;Accuracy: %.3f")
for epoch in range(EPOCH):
    for step, (x,y) in enumerate(train_loader):
        b_x = Variable(x.cuda() if GPU_NUMS > 0 else x)
        b_y = Variable(y.type(torch.LongTensor).cuda() if GPU_NUMS > 0 else y.type(torch.LongTensor)).squeeze_()
        output = cnn(b_x)
        loss = loss_func(output, b_y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        prediction = torch.max(F.softmax(output, dim=1), 1)[1]
        pred_y = prediction.cpu().data.numpy().squeeze()
        target_y = b_y.cpu().data.numpy()
        accuracy = sum(pred_y == target_y) / len(target_y)
Ejemplo n.º 14
0
            nn.ReLU(True),
            nn.ConvTranspose2d(8, 1, 2, stride=2, padding=1),  # b, 1, 28, 28
            nn.Tanh()
        )

    def forward(self, x):
        x = self.encoder(x)
        x = self.decoder(x)
        return x


model = autoencoder().cuda() if torch.cuda.is_available() else autoencoder()
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,
                             weight_decay=1e-5)
proBar = ProgressBar(num_epochs, len(dataloader), "loss:%.3f")
for epoch in range(num_epochs):
    for data in dataloader:
        img, _ = data
        img = Variable(img).cuda() if torch.cuda.is_available() else Variable(img)
        # ===================forward=====================
        output = model(img)
        loss = criterion(output, img)
        # ===================backward====================
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        proBar.show(loss.data[0])

    if epoch % 10 == 0:
        pic = to_img(output.cpu().data)
Ejemplo n.º 15
0
                                  ReLU(True), Linear(12, 3))
        self.decoder = Sequential(Linear(3, 12), ReLU(True), Linear(12, 64),
                                  ReLU(True), Linear(64, 128), ReLU(True),
                                  Linear(128, 28 * 28), Tanh())

    def forward(self, x):
        x = self.encoder(x)
        x = self.decoder(x)
        return x


model = autoencoder().cuda() if GPU_NUMS > 0 else autoencoder()
criterion = MSELoss()
optimizer = Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5)

proBar = ProgressBar(EPOCH, len(train_loader), "Loss:%.3f")

for epoch in range(1, EPOCH):
    for data in train_loader:
        img, _ = data
        img = img.view(img.size(0), -1)
        img = Variable(img).cuda() if GPU_NUMS > 0 else Variable(img)
        # ===================forward=====================
        output = model(img)
        loss = criterion(output, img)
        # ===================backward====================
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        proBar.show(epoch, loss.item())
    # ===================log========================
Ejemplo n.º 16
0
loss_fn = cross_entropy2d

'''
Data
'''

data_loader = VOCSegDataSet(is_transform=True, img_size=(FCNConfig["IMAGE_SIZE"], FCNConfig["IMAGE_SIZE"]),
                            augmentations=Compose([RandomRotate(10),
                                                   RandomHorizontallyFlip()]), img_norm=True)

train_loader = DataLoader(data_loader, batch_size=FCNConfig["BATCH_SIZE"], shuffle=True)

'''
Train
'''
bar = ProgressBar(FCNConfig["EPOCHS"], len(train_loader), "Loss:%.3f")
for epoch in range(1, FCNConfig["EPOCHS"]):
    model.train()
    for i, (images, labels) in enumerate(train_loader):
        images = Variable(images.cuda() if FCNConfig["GPU_NUMS"] > 0 else images)
        labels = Variable(labels.cuda() if FCNConfig["GPU_NUMS"] > 0 else labels)

        optimizer.zero_grad()
        outputs = model(images)

        loss = loss_fn(input=outputs, target=labels)

        loss.backward()
        optimizer.step()

        bar.show(epoch, loss.item())
Ejemplo n.º 17
0
    return BCE + KLD


model = VAE()
if GPU_NUMS > 0:
    model.cuda()

reconstruction_function = BCELoss()
reconstruction_function.size_average = False
optimizer = Adam(model.parameters(), lr=1e-4)

dataset = ImageFolder(root='/input/face/64_crop',
                      transform=Compose([ToTensor()]))
train_loader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)

bar = ProgressBar(EPOCH, len(train_loader), "Loss:%.3f")

model.train()
train_loss = 0
for epoch in range(EPOCH):
    for ii, (image, label) in enumerate(train_loader):
        mini_batch = image.shape[0]
        data = Variable(image.cuda() if GPU_NUMS > 0 else image)
        optimizer.zero_grad()
        recon_batch, mu, logvar = model(data)
        loss = loss_function(recon_batch, data, mu, logvar)
        loss.backward()
        train_loss += loss.data[0]
        optimizer.step()

        bar.show(loss.data[0] / mini_batch)
Weights_L1 = tf.Variable(tf.random_normal([2, 10]))
biases_L1 = tf.Variable(tf.zeros([1, 10]))
Wx_plus_b_L1 = tf.matmul(x, Weights_L1) + biases_L1
L1 = tf.nn.relu(Wx_plus_b_L1)

Weights_L2 = tf.Variable(tf.random_normal([10, 1]))
biases_L2 = tf.Variable(tf.zeros([1, 1]))
prediction = tf.matmul(L1, Weights_L2) + biases_L2
# prediction = tf.nn.tanh(Wx_plus_b_L2)

loss = tf.reduce_mean(tf.square(y - prediction))
train_step = tf.train.AdamOptimizer(0.001).minimize(loss)

predict = []
myloss = []
bar = ProgressBar(1, STEPS, "train_loss:%.9f")
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for step in range(STEPS):
    _, train_loss, prediction_value = sess.run([train_step, loss, prediction],
                                               feed_dict={
                                                   x: x_train,
                                                   y: y_train
                                               })

    bar.show(1, train_loss)
    if (step + 1) % DECAY_STEP == 0:
        predict.append(prediction_value)
        myloss.append(train_loss)

fig, ax = plt.subplots()
Ejemplo n.º 19
0
optimizer = SGD(Net.parameters(), lr=1e-3, momentum=0.9, weight_decay=5e-4)

criterion = MultiBoxLoss().cuda() if GPU_NUMS > 0 else MultiBoxLoss()
Net.train()
loc_loss, conf_loss = 0, 0
dataset = SSDDataSetForPytorch(
    transform=Augmentation(),
    target_transform=AnnotationTransform(classes=VOC_CLASSES))
data_loader = DataLoader(dataset,
                         BATCH_SIZE,
                         shuffle=True,
                         collate_fn=detection_collate,
                         pin_memory=True)
epoch_size = len(dataset) // BATCH_SIZE

bar = ProgressBar(EPOCH, len(data_loader), "Loss:%.3f")

for epoch in range(1, EPOCH):
    for i, (imgs, targets) in enumerate(data_loader):
        imgs = Variable(imgs.cuda() if GPU_NUMS > 0 else imgs)
        targets = [anno.cuda() for anno in targets
                   ] if GPU_NUMS > 0 else [anno for anno in targets]

        optimizer.zero_grad()

        out = Net(imgs)

        loss_l, loss_c = criterion(out, targets)
        loss = loss_c + loss_l
        loss.backward()
        optimizer.step()