コード例 #1
0
def train_model():
    # Device configuration
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Hyper-parameters
    num_of_samples = 50
    input_size = 1
    num_classes = 2
    num_epochs = 30
    batch_size = 10
    learning_rate = 0.1

    # Initialize the model with pre-trained parameters.
    model = model_0.Model(input_size, num_classes)

    # Load model parameters.
    try:
        with open('model_0_parameters', 'rb') as f:
            print('Retrieving data...')
            model_parameters = torch.load(f)
            model.load_state_dict(model_parameters)
            print('Data loaded.')
    except FileNotFoundError:
        print('Data file does not exist.')
    finally:
        pass

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    total_steps = num_of_samples // batch_size
    for epoch in range(num_epochs):
        for i, (data,
                labels) in enumerate(train_loader(num_of_samples, batch_size)):
            # print('data shape: {}, labels shape: {}'.format(data.shape, labels.shape))
            data = data.reshape(batch_size, -1)
            # print('Data: {}, Labels: {}'.format(data, labels))
            # Forward pass. Get the predicted output from the model.
            outputs = model(data)
            # Evaluate the loss.
            # print('outputs shape:', outputs.shape)
            # print('outputs:', outputs)
            # print('labels shape:', labels.shape)
            # print('labels:', labels)
            loss = criterion(outputs, labels)
            # Backward pass. Optimize the weightings.
            optimizer.zero_grad(
            )  # Why zero_grad()? Clear what accumulated gradient of mini-batch?
            loss.backward()
            optimizer.step()
            # Display progress.
            if (i + 1) % 10 == 0:
                print('Epoch {}, Step[{}/{}], Loss:{}'.format(
                    epoch, i + 1, total_steps, loss.item()))

    # Save model parameters.
    with open('model_0_parameters', 'wb') as f:
        print('Saving data...')
        torch.save(model.state_dict(), f)
        print('Data saved.')
コード例 #2
0
    def __init__(self, train_path, test_path, model):
        self.train_data_loader = dataset.train_loader(train_path)
        self.test_data_loader = dataset.test_loader(test_path)

        self.model = model
        self.criterion = torch.nn.CrossEntropyLoss()
        self.prunner = FilterPrunner(self.model)
        self.model.train()
コード例 #3
0
 def __init__(self, train_path, model, log_dir=None, ctx=mx.cpu()):
     self.train_data_loader = dataset.train_loader(train_path,
                                                   batch_size=128)
     a, self.valid_data_loader, b = dataset.train_valid_test_loader(
         train_path, (0.9, 0.05), batch_size=64)
     self.model = model
     self.ctx = ctx
     self.criterion = models.AngleLoss()
     self.log_dir = log_dir
     self.p = Printer(log_dir)
     self.model_save_path = os.path.join(self.log_dir, "model")
     self.model_saved = False
     self.device_id = 6
コード例 #4
0
    # state_dict = torch.load('mobilenetv2_9.pth')
    # for k in list(state_dict.keys()):
    #     if 'module' in k:
    #         state_dict[k.replace('module.', '')] = state_dict[k]
    #         del state_dict[k]
    #
    # net.load_state_dict(state_dict)

    weights_normal_init(net)
    net = net.cuda()

    net = torch.nn.DataParallel(net)
    cudnn.benchmark = True

    train_dataset = train_loader()
    train_logger = Logger('./logs')

    val_dataset = val_loader()

    lr = 0.05

    for epoch in range(10):
        train(epoch, net, train_dataset, lr, train_logger)
        lr *= 0.8
        test(epoch, net, val_dataset, train_logger)

    # net = Mobilenetv2()
    #
    # state_dict = torch.load('mobilenetv2_9.pth')
    #
コード例 #5
0
        x = self.classifier(x)
        x = x.view(x.size(0), -1)
        print(x.shape)
        x = self.linear(x)

        return x


config_list = [{'sparsity': 0.5, 'op_types': ['Conv2d']}]
pretrain_epochs = 1
prune_epochs = 1
device = 'cuda'
train_path = './train'
test_path = './test'

train_data_loader = dataset.train_loader(train_path)
test_data_loader = dataset.test_loader(test_path)

criterion = torch.nn.CrossEntropyLoss()


def train(model, device, train_loader, optimizer):
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()