コード例 #1
0
def main():

    env_name = 'CartPole-v0'
    env = gym.make(env_name)
    action_space = env.action_space.n
    observation_space = env.observation_space.low.shape
    # set logger
    logging.config.fileConfig('./log/log.conf')
    logger = logging.getLogger(__name__)
    logger.info('START')

    # set network model
    shared_model = A3CFFSoftmaxFFF(observation_space, action_space)
    # set optimizer
    opt = RMSpropAsync(lr=LEARNING_RATE , alpha=0.99 , eps=RMSPROP_EPS)
    opt.setup(shared_model)
    opt.add_hook(chainer.optimizer.GradientClipping(40))

    writer = SummaryWriter('results/' + datetime.datetime.now().strftime('%B%d  %H:%M:%S'))
    state = env.reset()
    state = chainer.Variable(np.expand_dims(np.array(state).astype(np.float32), axis=0))
    pi, v = shared_model.get_pi_and_v(state)
    writer.add_graph([pi, v])
    writer.close()

    async_train(env_name, shared_model, opt, phi)

    logger.info('END')
コード例 #2
0
        return i


def classification_accuracy(out, labels):
    # mi servono argmax
    _, out = torch.max(out, 1)
    accuracy = torch.sum(out == labels).float()
    accuracy /= len(out)
    return accuracy


net = Net().cuda()

writer = SummaryWriter('runs/' + datetime.now().strftime('%B%d  %H:%M:%S'))
writer.add_graph(
    net,
    net(Variable(torch.rand(1, 75, features_size), requires_grad=True).cuda()))

loader = DataLoader(MyDataset("data/text_1", input_len=75, output_len=1),
                    batch_size=64,
                    shuffle=True)

# net = net.cuda()
optimizer = Adam(params=net.parameters(), lr=0.001)

# loss
loss = nn.NLLLoss()
batch_number = len(loader)
num_epochs = 500
logging_step = 50
logging_text_step = 1000
コード例 #3
0
        self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
        self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
        self.conv2_drop = nn.Dropout2d()
        self.fc1 = nn.Linear(320, 50)
        self.fc2 = nn.Linear(50, 10)
        self.bn = nn.BatchNorm2d(20)

    def forward(self, x):
        x = F.max_pool2d(self.conv1(x), 2)
        x = F.relu(x) + F.relu(-x)
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = self.bn(x)
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        x = F.log_softmax(x)
        return x


model = Mnist()

# if you want to show the input tensor, set requires_grad=True
res = model(
    torch.autograd.Variable(torch.Tensor(1, 1, 28, 28), requires_grad=True))

writer = SummaryWriter('runs/' + datetime.now().strftime('%B%d  %H:%M:%S'))
writer.add_graph(model, res)

writer.close()
コード例 #4
0
def train():
    data_augmentation = DataAugmentationTransform_old(translation_range=(0.0, 0.15),
                                                      rotation_range=10,
                                                      zoom_range = (0.8, 1.0),
                                                      flip_p = 0.5,
                                                      brightness_range = (-0.2, 0.2),
                                                      gamma_range = (0.5, 1.5),
                                                      saturation_range=(-0.3, 0.3))
    loader_train = CityscapesLoader(base_data_folder, split='train', is_transform=True, img_size=image_shape, transforms=None)
    trainloader = data.DataLoader(loader_train, batch_size=batch_size, num_workers=4, shuffle=True, pin_memory=True)
    if overlay_during_training:
        loader_test = CityscapesLoader(base_data_folder, split='test', is_transform=True, img_size=image_shape)
        test_loader = data.DataLoader(loader_test, batch_size=batch_size, num_workers=4, shuffle=False, pin_memory=True)
    if check_validation:
        loader_val = CityscapesLoader(base_data_folder, split='val', is_transform=True, img_size=image_shape)
        valloader = data.DataLoader(loader_val, batch_size=batch_size, num_workers=4, shuffle=False, pin_memory=True)
    model = get_model('fcn1s',num_classes)

    writer = SummaryWriter()

    if resume:
        print("Resuming From ",resume_filename)
        checkpoint = torch.load(resume_filename)
        model.load_state_dict(checkpoint['state_dict'])
        #starting_epoch = checkpoint['epoch']
        #optimizer.load_state_dict(checkpoint['optimizer'])

    for param in model.parameters():
        param.requires_grad = True

    if freeze_layers:
        print("Freezing VGG layers")
        for param in model.conv_block1.parameters():
            param.requires_grad = False
        for param in model.conv_block2.parameters():
            param.requires_grad = False
        for param in model.conv_block3.parameters():
            param.requires_grad = False
        for param in model.conv_block4.parameters():
            param.requires_grad = False
        for param in model.conv_block5.parameters():
            param.requires_grad = False

    if torch.cuda.is_available():
        print("Using GPU")
        model.cuda(0)
    else:
        print("Using CPU")

    model.train()

    parameters = filter(lambda p: p.requires_grad, model.parameters())
    if opt == "SGD":
        optimizer = torch.optim.SGD(parameters, lr=l_rate, momentum=0.9, weight_decay=5e-4)
    elif opt =="Adam":
        optimizer = torch.optim.Adam(parameters, lr=l_rate, weight_decay=5e-4)

    best_metric = 0
    old_file = ""
    for epoch in range(starting_epoch, epochs):
        train_acc = 0
        train_IoU = 0
        train_loss = 0
        train_count = 0

        print("\nEpoch: ",epoch)

        if overlay_during_training and epoch % 5  == 0:
            test_img = loader_test[67]
            test_img = test_img.unsqueeze(0)
            model.eval()
            test_pred = model(Variable(test_img.cuda(0), requires_grad=True))
            test_img = Variable(test_img.cuda(0), requires_grad=True)
            overlay_images(test_img, test_pred, epoch, '67_')
            writer.add_graph(model, test_pred)
            del test_pred
            del test_img

            test_img = loader_test[88]
            test_img = test_img.unsqueeze(0)
            test_pred = model(Variable(test_img.cuda(0), requires_grad=True))
            test_img = Variable(test_img.cuda(0), requires_grad=True)
            overlay_images(test_img, test_pred, epoch, '88_')
            del test_pred
            del test_img

            test_img = loader_test[175]
            test_img = test_img.unsqueeze(0)
            test_pred = model(Variable(test_img.cuda(0), requires_grad=True))
            test_img = Variable(test_img.cuda(0), requires_grad=True)
            overlay_images(test_img, test_pred, epoch, '175_')
            del test_pred
            del test_img

        model.train()
        with tqdm.tqdm(trainloader, ncols=100) as t:
            for i, (images, labels) in enumerate(t):
                if torch.cuda.is_available():
                    images = Variable(images.cuda(0))
                    labels = Variable(labels.cuda(0))
                else:
                    images = Variable(images)
                    labels = Variable(labels)

                iter = len(trainloader) * epoch + i
                if poly_lr:
                    poly_lr_scheduler(optimizer, l_rate, iter, lr_decay_iter=10)

                optimizer.zero_grad()
                outputs = model(images)

                loss = cross_entropy2d(outputs, labels, ignore_index=255)

                loss.backward()
                optimizer.step()

                #print("%8.2f %%  ->  Loss: %8.6f " % (i / len(trainloader) * 100, loss.data[0]), end='\r')
                t.set_description('Loss: %8.6f' % loss.data[0])
                t.update(1)

                train_loss = train_loss + loss.data[0]
                acc, IoU = accuracy_IoU(outputs,labels, np.array(range(num_classes)))
                train_acc = train_acc + acc
                train_IoU = train_IoU + IoU.mean()
                train_count = train_count + 1

                del outputs
                del loss
                del images
                del labels


        train_acc = train_acc / train_count
        train_IoU = train_IoU / train_count
        train_loss = train_loss / train_count
        print("\nTrain Accuracy: ", train_acc)
        print("Train Loss: ", train_loss)
        print("Train IoU: ", train_IoU, "\n")
        writer.add_scalar('Train Accuracy', train_acc, epoch)
        writer.add_scalar('Train IoU', train_IoU, epoch)
        writer.add_scalar('Train Los', train_loss, epoch)

        if check_validation:
            #VALIDATION!!!
            val_acc = 0
            val_IoU = 0
            val_loss = 0
            val_count = 0
            model.eval()
            for i, (images, labels) in enumerate(valloader):
                if torch.cuda.is_available():
                    images = Variable(images.cuda(0))
                    labels = Variable(labels.cuda(0))
                else:
                    images = Variable(images)
                    labels = Variable(labels)
                iter = len(trainloader) * epoch + i
                #poly_lr_scheduler(optimizer, l_rate, iter)

                outputs = model(images)

                loss = cross_entropy2d(outputs, labels, ignore_index=255)

                val_loss = val_loss + loss.data[0]
                acc, IoU = accuracy_IoU(outputs,labels, np.array(range(num_classes)))
                val_acc = val_acc + acc
                val_IoU = val_IoU + IoU.mean()
                val_count = val_count + 1

                del outputs
                del loss
                del images
                del labels
            val_acc = val_acc / val_count
            val_IoU = val_IoU / val_count
            val_loss = val_loss / val_count
            print("\nVal Accuracy: ", val_acc)
            print("Val Loss: ", val_loss)
            print("Val IoU: ", val_IoU, "\n")
            writer.add_scalar('Val Accuracy', val_acc, epoch)
            writer.add_scalar('Val IoU', val_IoU, epoch)
            writer.add_scalar('Val Loss', val_loss, epoch)

        save_metric = val_IoU
        if check_validation:
            save_metric = val_IoU

        if best_metric < save_metric:
            best_metric = save_metric
            print("New Best IoU!")
            if save:
                torch.save({
                    'epoch': epoch,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                },
                 base_save_folder + "/checkpoint_" + str(epoch) + "_" + str(save_metric) + ".pth.tar")
                print("Model Saves As " + base_save_folder + "/checkpoint_" + str(epoch) + "_" + str(save_metric) + ".pth.tar")
                if os.path.isfile(old_file):
                    os.remove(old_file)
                old_file = base_save_folder + "/checkpoint_" + str(epoch) + "_" + str(save_metric) + ".pth.tar"

        print("Best IoU So Far: ", best_metric)

    writer.close()
    print("End Of Training")
コード例 #5
0
data_train = train_raw[0].reshape(-1, 1, 28, 28)
labels_train = train_raw[1]

# converto a variabili

# loader
loader = DataLoader(TensorDataset(torch.FloatTensor(data_train[0:]), torch.LongTensor(labels_train[0:])), batch_size=16)

# loss
loss = nn.NLLLoss()
# due grafi?
# net.batch_real = True
# writer.add_graph(net, net(Variable(torch.FloatTensor(data_train[0:1]), requires_grad=True)))
net.train(False)
net.batch_real = False
writer.add_graph(net, net(Variable(torch.randn(1,100), requires_grad=True).cuda()))

batch_number = len(loader)
num_epochs = 50
num_epochs_pretrain = 7
logging_step = 50
logging_image_step = 25
widgets = [
    'Batch: ', progressbar.Counter(),
    '/', progressbar.FormatCustomText('%(total)s', {"total": batch_number}),
    ' ', progressbar.Bar(marker="-", left='[', right=']'),
    ' ', progressbar.ETA(),
    ' ', progressbar.DynamicMessage('loss_discriminator'),
    ' ', progressbar.DynamicMessage('loss_generator'),
    ' ', progressbar.DynamicMessage("accuracy_discriminator"),
    ' ', progressbar.DynamicMessage('accuracy_generator'),
コード例 #6
0
ファイル: conv.py プロジェクト: lucabergamini/torch_snippets
optimizer = SGD(params=net.parameters(), lr=0.01, momentum=0.85)
#definisco input da numpy
train_raw, _, test_raw = cPickle.load(open("data/mnist.pkl"))
data_train = train_raw[0].reshape(-1, 1, 28, 28)
labels_train = train_raw[1]

#converto a variabili

#loader
loader = DataLoader(TensorDataset(torch.FloatTensor(data_train[0:]),
                                  torch.LongTensor(labels_train[0:])),
                    batch_size=32)

#loss
loss = nn.NLLLoss()
writer.add_graph(
    net, net(Variable(torch.FloatTensor(data_train[0:1]), requires_grad=True)))

batch_number = len(loader)
num_epochs = 10
logging_step = 50
logging_image_step = 100
widgets = [
    'Batch: ',
    progressbar.Counter(),
    '/',
    progressbar.FormatCustomText('%(total)s', {"total": batch_number}),
    ' ',
    progressbar.Bar(marker="-", left='[', right=']'),
    ' ',
    progressbar.ETA(),
    ' ',
コード例 #7
0
net = Net()
optimizer = SGD(params=net.parameters(), lr=0.5, momentum=0.85)
#definisco input da numpy
#data = numpy.random.randint(-2,2,120)

data = numpy.random.randint(0, 2, (120, 2))
labels = data[:, 0:1] | data[:, 1:]
#labels = to_categorical(labels,num_classes=2)

#converto a variabili
data = Variable(torch.FloatTensor(data), requires_grad=True)
labels = Variable(torch.LongTensor(labels)).squeeze()

loss = nn.NLLLoss()

writer.add_graph(net, net(Variable(torch.rand(120, 2), requires_grad=True)))

for i in xrange(120):
    #calcolo uscita
    out = net(data)

    #azzero gradiente
    net.zero_grad()
    optimizer.zero_grad()
    #loss
    loss_value = loss(out, labels)
    writer.add_scalar('loss_NLL', loss_value.data[0], i)
    #propago
    loss_value.backward()
    #adesso ho accesso al gradiente e posso aggiornare anche i pesi
    optimizer.step()