Exemple #1
0
def main():
    transform = transforms.Compose([
        transforms.Resize((32, 32)),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    classes = ("plane", "car", "bird", "cat", "deer", "dog", "frog", "horse",
               "ship", "truck")
    net = Lenet()
    net.load_state_dict(torch.load("Lenet.pth"))
    im = Image.open("1.jpg")
    im = transform(im)
    im = torch.unsqueeze(im, dim=0)
    with torch.no_grad():
        outputs = net(im)
        predict = torch.max(outputs, dim=1)[1].data.numpy()
        print(classes[int(predict)])
Exemple #2
0
def lenet():
    with flow.scope.placement("cpu", "0:0"):
        x = flow.get_variable(
                name="x1",
                shape=(100,1, 28, 28),
                dtype=flow.float,
                initializer=flow.constant_initializer(1),
            )
        return Lenet(x)
Exemple #3
0
def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    train_sets = torchvision.datasets.CIFAR10(root="./data",
                                              train=True,
                                              download=False,
                                              transform=transform)
    train_loader = torch.utils.data.DataLoader(train_sets,
                                               batch_size=36,
                                               shuffle=True,
                                               num_workers=0)

    val_sets = torchvision.datasets.CIFAR10(root="./data",
                                            train=False,
                                            download=False,
                                            transform=transform)
    val_loader = torch.utils.data.DataLoader(val_sets,
                                             batch_size=1000,
                                             shuffle=False,
                                             num_workers=0)
    val_data_iter = iter(val_loader)
    val_images, val_labels = val_data_iter.next()

    net = Lenet()
    net.to(device)
    loss_function = nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters(), lr=.001)

    for epoch in range(10):
        running_loss = 0.0
        #enumerate(train_loader)  类型list  [[],[[tensor image],[tensor label]]]
        # tensor image = [batch,channel,image.height,image.width]
        # tensor label = [len(batch)]
        for step, data in enumerate(train_loader):
            inputs, labels = data
            optimizer.zero_grad()

            outputs = net(inputs.to(device))
            loss = loss_function(outputs, labels.to(device))
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            if step % 500 == 499:
                with torch.no_grad():
                    outputs = net(val_images.to(device))
                    predict_y = torch.max(outputs, dim=1)[1]
                    accuracy = torch.eq(
                        predict_y.to(device), val_labels.to(
                            device)).sum().item() / val_labels.size(0)
                    print('[%d, %5d] train_loss: %.3f  test_accuracy: %.3f' %
                          (epoch + 1, step + 1, running_loss / 500, accuracy))
                    running_loss = 0.0

    print("Finishing Training")
    sava_path = "./Lenet.pth"
    torch.save(net.state_dict(), sava_path)
Exemple #4
0
 def repvgg_inference(image: tp.Numpy.Placeholder(shape=(1, 1, 28,
                                                         28))) -> tp.Numpy:
     input_lbns["image"] = image.logical_blob_name
     output = Lenet(image)
     output_lbns["output"] = output.logical_blob_name
     return output
Exemple #5
0
def main():

    sum_mw1_after_pruning = []

    train_loader = DataLoader(datasets.MNIST('../data',
                                             train=True,
                                             download=True,
                                             transform=transforms.Compose([
                                                 transforms.ToTensor(),
                                                 transforms.Normalize(
                                                     (0.1307, ), (0.3081, ))
                                             ])),
                              batch_size=opt.batch_size,
                              shuffle=True)

    test_loader = DataLoader(datasets.MNIST('../data',
                                            train=False,
                                            transform=transforms.Compose([
                                                transforms.ToTensor(),
                                                transforms.Normalize(
                                                    (0.1307, ), (0.3081, ))
                                            ])),
                             batch_size=opt.batch_size,
                             shuffle=True)

    overall_acc_0_init = []
    overall_acc_4_init = []
    overall_acc_20_init = []
    overall_acc_60_init = []

    overall_acc_0_rand = []
    overall_acc_4_rand = []
    overall_acc_20_rand = []
    overall_acc_60_rand = []

    pruning = [4, 20, 60]
    # pruning = [4, 20, 60]

    lbls = ['0', '4', '20', '60']

    for test in range(10):

        print("test {}".format(test))

        model = Lenet(28 * 28, opt.h1_dim, opt.h2_dim).to(device)

        optimizer = torch.optim.Adam(
            [w for name, w in model.named_parameters() if not 'mask' in name],
            lr=opt.lr)

        all_acc = []

        acc_0 = []

        iteration = 0
        for epoch in range(1, opt.epochs + 1):
            # print(epoch)
            model.train()
            for batch_idx, (data, target) in enumerate(train_loader):
                iteration += 1
                data, target = data.to(device), target.to(device)
                optimizer.zero_grad()
                output = model(data)
                loss = F.nll_loss(output, target)
                loss.backward()
                optimizer.step()
                if iteration % opt.record_every == 0:

                    model.eval()
                    # test_loss = 0
                    correct = 0
                    with torch.no_grad():
                        for data, target in test_loader:
                            data, target = data.to(device), target.to(device)
                            output = model(data)
                            # test_loss += F.nll_loss(output, target, reduction='sum').item()  # sum up batch loss
                            pred = output.argmax(
                                dim=1, keepdim=True
                            )  # get the index of the max log-probability
                            correct += pred.eq(
                                target.view_as(pred)).sum().item()

                    # test_loss /= len(test_loader.dataset)

                    acc_0.append(correct / len(test_loader.dataset))

        all_acc.append(acc_0)

        model.save_trained_weight()

        print("smart")

        for rate in pruning:

            print("rate: {}".format(rate))

            tmpr_acc = []
            output_rate = int(rate / 2)

            optimizer = torch.optim.Adam([
                w for name, w in model.named_parameters() if not 'mask' in name
            ],
                                         lr=opt.lr)  #try with that
            # print("..........")
            #
            # print(model.mask1.weight.data.sum().item())

            model.load_trained_weight()
            # print(model.mask1.weight.data.sum().item())
            model.reset_mask()
            # print(model.mask1.weight.data.sum().item())
            model.prune(rate, output_rate)
            # print(model.mask1.weight.data.sum().item())
            model.reinitializ()
            # print(model.mask1.weight.data.sum().item())

            # print(rate)
            #
            # print(model.mask1.weight.data.sum().item())
            #
            # print(model.mask1.weight.data.shape)
            #
            # print(model.mask1.weight.data)
            #
            # print("..........")

            first = True

            for epoch in range(1, opt.epochs + 1):
                # print(epoch)
                model.train()
                for batch_idx, (data, target) in enumerate(train_loader):
                    iteration += 1
                    data, target = data.to(device), target.to(device)
                    optimizer.zero_grad()
                    output = model(data, verbal=first)
                    first = False
                    loss = F.nll_loss(output, target)
                    loss.backward()
                    optimizer.step()
                    if iteration % opt.record_every == 0:
                        model.eval()
                        test_loss = 0
                        correct = 0
                        with torch.no_grad():
                            for data, target in test_loader:
                                data, target = data.to(device), target.to(
                                    device)
                                output = model(data)
                                # test_loss += F.nll_loss(output, target, reduction='sum').item()  # sum up batch loss
                                pred = output.argmax(
                                    dim=1, keepdim=True
                                )  # get the index of the max log-probability
                                correct += pred.eq(
                                    target.view_as(pred)).sum().item()

                        # test_loss /= len(test_loader.dataset)

                        tmpr_acc.append(correct / len(test_loader.dataset))
            all_acc.append(tmpr_acc)

        if opt.plot:

            plt.clf()
            for acc, lbl in zip(all_acc, lbls):
                plt.plot(np.arange(len(acc)), acc, label=lbl)
            plt.legend(title="Pruning (%):")
            plt.xlabel("Iteration")
            plt.ylabel("Test Accuracy")
            plt.savefig("lotteryticket_smart_init_{}".format(test))
            plt.close()

        overall_acc_0_init.append(all_acc[0])
        overall_acc_4_init.append(all_acc[1])
        overall_acc_20_init.append(all_acc[2])
        overall_acc_60_init.append(all_acc[3])

        all_acc = []
        all_acc.append(acc_0)
        print("rand")

        for rate in pruning:

            print("rate: {}".format(rate))

            tmpr_acc = []
            output_rate = int(rate / 2)

            optimizer = torch.optim.Adam([
                w for name, w in model.named_parameters() if not 'mask' in name
            ],
                                         lr=opt.lr)

            model.load_trained_weight()
            model.reset_mask()
            model.prune(rate, output_rate)
            model.random_reinit()

            # print("..........")
            #
            # print(rate)
            #
            # print(model.mask1.weight.data.sum().item())
            #
            # print(model.mask1.weight.data.shape)
            #
            # print(model.mask1.weight.data)
            #
            # print("..........")

            first = True

            for epoch in range(1, opt.epochs + 1):
                # print(epoch)
                model.train()
                for batch_idx, (data, target) in enumerate(train_loader):
                    iteration += 1
                    data, target = data.to(device), target.to(device)
                    optimizer.zero_grad()
                    output = model(data, verbal=first)
                    first = False
                    loss = F.nll_loss(output, target)
                    loss.backward()
                    optimizer.step()
                    if iteration % opt.record_every == 0:
                        model.eval()
                        test_loss = 0
                        correct = 0
                        with torch.no_grad():
                            for data, target in test_loader:
                                data, target = data.to(device), target.to(
                                    device)
                                output = model(data)
                                # test_loss += F.nll_loss(output, target, reduction='sum').item()  # sum up batch loss
                                pred = output.argmax(
                                    dim=1, keepdim=True
                                )  # get the index of the max log-probability
                                correct += pred.eq(
                                    target.view_as(pred)).sum().item()

                        # test_loss /= len(test_loader.dataset)

                        tmpr_acc.append(correct / len(test_loader.dataset))
            all_acc.append(tmpr_acc)

        if opt.plot:

            plt.clf()
            for acc, lbl in zip(all_acc, lbls):
                plt.plot(np.arange(len(acc)), acc, label=lbl)
            plt.legend(title="Pruning (%):")
            plt.xlabel("Iteration")
            plt.ylabel("Test Accuracy")
            plt.savefig("lotteryticket_rand_init_{}".format(test))
            plt.close()

        overall_acc_0_rand.append(all_acc[0])
        overall_acc_4_rand.append(all_acc[1])
        overall_acc_20_rand.append(all_acc[2])
        overall_acc_60_rand.append(all_acc[3])

    acc_0_init_np = np.array(overall_acc_0_init)
    acc_4_init_np = np.array(overall_acc_4_init)
    acc_20_init_np = np.array(overall_acc_20_init)
    acc_60_init_np = np.array(overall_acc_60_init)

    acc_0_rand_np = np.array(overall_acc_0_rand)
    acc_4_rand_np = np.array(overall_acc_4_rand)
    acc_20_rand_np = np.array(overall_acc_20_rand)
    acc_60_rand_np = np.array(overall_acc_60_rand)

    acc_0_init_mean = np.mean(acc_0_init_np, axis=0)
    acc_4_init_mean = np.mean(acc_4_init_np, axis=0)
    acc_20_init_mean = np.mean(acc_20_init_np, axis=0)
    acc_60_init_mean = np.mean(acc_60_init_np, axis=0)

    acc_0_rand_mean = np.mean(acc_0_rand_np, axis=0)
    acc_4_rand_mean = np.mean(acc_4_rand_np, axis=0)
    acc_20_rand_mean = np.mean(acc_20_rand_np, axis=0)
    acc_60_rand_mean = np.mean(acc_60_rand_np, axis=0)

    all_acc_mean = [
        acc_0_init_mean, acc_4_init_mean, acc_20_init_mean, acc_60_init_mean
    ]

    if opt.plot:

        plt.clf()
        for acc, lbl in zip(all_acc_mean, lbls):
            plt.plot(np.arange(len(acc)), acc, label=lbl)
        plt.legend(title="Pruning (%):")
        plt.xlabel("Iteration")
        plt.ylabel("Test Accuracy")
        plt.savefig("lotteryticket_smart_init_mean")
        plt.close()

        all_acc_mean = [
            acc_0_rand_mean, acc_4_rand_mean, acc_20_rand_mean,
            acc_60_rand_mean
        ]

        plt.clf()
        for acc, lbl in zip(all_acc_mean, lbls):
            plt.plot(np.arange(len(acc)), acc, label=lbl)
        plt.legend(title="Pruning (%):")
        plt.xlabel("Iteration")
        plt.ylabel("Test Accuracy")
        plt.savefig("lotteryticket_rand_init_mean")
        plt.close()
Exemple #6
0
D_out = 6
gpu = 1

# We'll only take a small subset of the test data for quick evaluation
test_data = test_data[:1000]
test_labels = test_labels[:1000]

# The network expects an input of batch_size x n_channels x height x width
# n_channels in our case is 1. For RGB images, it is 3.
print(train_data.shape)

# Preprocess your images if you want
# train_data = preprocess_data()

# create_fcn function is written in model.py.
model = Lenet()
# Initialise a loss function.
# eg. if we wanted an MSE Loss: loss_fn = nn.MSELoss()
# Please search the PyTorch doc for cross-entropy loss function
loss_fn =

# Activate gradients for the input data
# x = torch.from_numpy(x)
# x = x.requires_grad_(True)
train_data = torch.from_numpy(train_data).float()
test_data = torch.from_numpy(test_data).float()

train_data =
test_data =

# If we're planning to use cross entropy loss, the data type of the
Exemple #7
0
train_generator = train_datagen.flow(
    train_data,
    train_label,
    #target_size=(norm_size, norm_size),
    batch_size=BATCH_SIZE,
    #class_mode='sparse'
)
print(train_generator)
# validation_generator = test_datagen.flow_from_directory(
#         test_data,test_label,
#         target_size=(norm_size, norm_size),
#         batch_size=BATCH_SIZE,
#         class_mode='sparse')

model = Lenet.build(width=norm_size,
                    height=norm_size,
                    depth=3,
                    classes=CLASS_NUM)
adm = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss='categorical_crossentropy',
              optimizer=adm,
              metrics=['accuracy'])

board = TensorBoard(log_dir=log_path, histogram_freq=1)
H = model.fit_generator(
    train_generator,
    #steps_per_epoch=len(train_generator)/EPOCHS,
    epochs=EPOCHS,
    validation_data=(test_data, test_label),
    callbacks=[board])
model.save(model_path)
D_in = 108 * 108
D_out = 6
gpu = 1

test_data = test_data[:1000]
test_labels = test_labels[:1000]

# The network expects an input of batch_size x (height * width)
# n_channels in our case is 1. For RGB images, it is 3.

# Preprocess your images if you want
# train_data = preprocess_data()

# Lenet is written in model.py
model = Lenet()

# Converting inputs and labels into cuda (gpu) enabled torch 'Variables'.
train_data = torch.from_numpy(train_data).float().requires_grad_(True)
test_data = torch.from_numpy(test_data).float().requires_grad_(True)
train_labels = torch.from_numpy(train_labels).long()
test_labels = torch.from_numpy(test_labels).long()

train_data = train_data.cuda()
test_data = test_data.cuda()

train_labels = train_labels.cuda()
test_labels = test_labels.cuda()

# Converting the entire data into cuda variable is NOT a good practice.
# We're still able to do it here because our data is small and can fit in