Beispiel #1
0
def get_network(model, channel, num_classes, im_size=(32, 32)):
    torch.random.manual_seed(int(time.time() * 1000) % 100000)
    net_width, net_depth, net_act, net_norm, net_pooling = get_default_convnet_setting(
    )

    if model == 'MLP':
        net = MLP(channel=channel, num_classes=num_classes)
    elif model == 'ConvNet':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=net_depth,
                      net_act=net_act,
                      net_norm=net_norm,
                      net_pooling=net_pooling,
                      im_size=im_size)
    elif model == 'LeNet':
        net = LeNet(channel=channel, num_classes=num_classes)
    elif model == 'AlexNet':
        net = AlexNet(channel=channel, num_classes=num_classes)
    elif model == 'VGG11':
        net = VGG11(channel=channel, num_classes=num_classes)
    elif model == 'VGG11BN':
        net = VGG11BN(channel=channel, num_classes=num_classes)
    elif model == 'ResNet18':
        net = ResNet18(channel=channel, num_classes=num_classes)
    elif model == 'ResNet18BN_AP':
        net = ResNet18BN_AP(channel=channel, num_classes=num_classes)

    elif model == 'ConvNetD1':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=1,
                      net_act=net_act,
                      net_norm=net_norm,
                      net_pooling=net_pooling)
    elif model == 'ConvNetD2':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=2,
                      net_act=net_act,
                      net_norm=net_norm,
                      net_pooling=net_pooling)
    elif model == 'ConvNetD3':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=3,
                      net_act=net_act,
                      net_norm=net_norm,
                      net_pooling=net_pooling)
    elif model == 'ConvNetD4':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=4,
                      net_act=net_act,
                      net_norm=net_norm,
                      net_pooling=net_pooling)

    elif model == 'ConvNetW32':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=32,
                      net_depth=net_depth,
                      net_act=net_act,
                      net_norm=net_norm,
                      net_pooling=net_pooling)
    elif model == 'ConvNetW64':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=64,
                      net_depth=net_depth,
                      net_act=net_act,
                      net_norm=net_norm,
                      net_pooling=net_pooling)
    elif model == 'ConvNetW128':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=128,
                      net_depth=net_depth,
                      net_act=net_act,
                      net_norm=net_norm,
                      net_pooling=net_pooling)
    elif model == 'ConvNetW256':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=256,
                      net_depth=net_depth,
                      net_act=net_act,
                      net_norm=net_norm,
                      net_pooling=net_pooling)

    elif model == 'ConvNetAS':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=net_depth,
                      net_act='sigmoid',
                      net_norm=net_norm,
                      net_pooling=net_pooling)
    elif model == 'ConvNetAR':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=net_depth,
                      net_act='relu',
                      net_norm=net_norm,
                      net_pooling=net_pooling)
    elif model == 'ConvNetAL':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=net_depth,
                      net_act='leakyrelu',
                      net_norm=net_norm,
                      net_pooling=net_pooling)

    elif model == 'ConvNetNN':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=net_depth,
                      net_act=net_act,
                      net_norm='none',
                      net_pooling=net_pooling)
    elif model == 'ConvNetBN':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=net_depth,
                      net_act=net_act,
                      net_norm='batchnorm',
                      net_pooling=net_pooling)
    elif model == 'ConvNetLN':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=net_depth,
                      net_act=net_act,
                      net_norm='layernorm',
                      net_pooling=net_pooling)
    elif model == 'ConvNetIN':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=net_depth,
                      net_act=net_act,
                      net_norm='instancenorm',
                      net_pooling=net_pooling)
    elif model == 'ConvNetGN':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=net_depth,
                      net_act=net_act,
                      net_norm='groupnorm',
                      net_pooling=net_pooling)

    elif model == 'ConvNetNP':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=net_depth,
                      net_act=net_act,
                      net_norm=net_norm,
                      net_pooling='none')
    elif model == 'ConvNetMP':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=net_depth,
                      net_act=net_act,
                      net_norm=net_norm,
                      net_pooling='maxpooling')
    elif model == 'ConvNetAP':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=net_depth,
                      net_act=net_act,
                      net_norm=net_norm,
                      net_pooling='avgpooling')

    else:
        net = None
        exit('DC error: unknown model')

    gpu_num = torch.cuda.device_count()
    if gpu_num > 0:
        device = 'cuda'
        if gpu_num > 1:
            net = nn.DataParallel(net)
    else:
        device = 'cpu'
    net = net.to(device)

    return net
Beispiel #2
0
else:
    raise ValueError("Invalid algorithm")

data, labels = utility.read_data(DATASET)

params = {
    'input_size': INPUT_SIZE,
    'output_size': OUTPUT_SIZE,
    'input_channels': INPUT_CHANNELS,
    'output_channels': OUTPUT_CAHNNELS,
    'num_classes': NUM_CLASSES,
    'batch_size': BATCH_SIZE,
    'processing_classes': PROCESSING_CLASSES,
    'output_function': OUTPUT_FUNCTION,
    'shuffle': False
}

generator = DataGenerator(data, **params)

base_model = AlexNet(INPUT_SIZE+(len(INPUT_CHANNELS), ), NUM_CLASSES)

base_model.load_weights(args.weights)

model = Model(inputs = base_model.input, outputs = GlobalAveragePooling2D()(base_model.get_layer('act4').output))

features = model.predict_generator(generator = generator, use_multiprocessing = True, workers = 8, verbose = 1)

with open(args.features, 'wb') as f:
    np.savez(f, features, labels)

Beispiel #3
0
def main(**kwargs):
    kwargs.setdefault('data_size', 500)
    kwargs.setdefault('epochs', 600)
    kwargs.setdefault('learning_rate', 0.001)
    kwargs.setdefault('patience', None)
    kwargs.setdefault('ewc', 0)
    kwargs.setdefault('batch_size', 128)
    kwargs.setdefault('cuda', None)
    kwargs.setdefault('dry_run', False)
    kwargs.setdefault('name', None)
    kwargs.setdefault('seed', 1337)
    kwargs.setdefault('verbose', 'WARN')
    kwargs.setdefault('task', ['+mnist', '-mnist'])
    args = SimpleNamespace(**kwargs)

    logging.basicConfig(
        level=args.verbose,
        style='{',
        format='[{levelname:.4}][{asctime}][{name}:{lineno}] {msg}',
    )

    logger.debug('parameters of this experiment')
    for key, val in args.__dict__.items():
        logger.debug(f' {key:.15}: {val}')

    seed(args.seed)

    datasets = {
        'mnist': MNIST(),
        'fashion': FashionMNIST(),
    }

    if args.name is None:
        now = np.datetime64('now')
        args.name = f'exp-{now}'
        logger.info(f'experiment name not given, defaulting to {args.name}')

    # In some cases, we must move the network to it's cuda device before
    # constructing the optimizer. This is annoying, and this logic is
    # duplicated in the estimator class. Ideally, I'd like the estimator to
    # handle cuda allocation _after_ the optimizer has been constructed...
    net = AlexNet(10, shape=(1, 27, 27))
    if args.cuda is None:
        args.cuda = 0 if torch.cuda.is_available() else False
    if args.cuda is not False:
        net = net.cuda(args.cuda)

    opt = O.Adagrad(net.parameters(),
                    lr=args.learning_rate,
                    weight_decay=0.004)
    loss = N.CrossEntropyLoss()
    model = EwcClassifier(net,
                          opt,
                          loss,
                          name=args.name,
                          cuda=args.cuda,
                          dry_run=args.dry_run)

    for task in args.tasks:
        data = datasets[task[1:]]
        train, test = data.load()

        if task[0] == '+':
            print(f'-------- Fitting {task[1:]} --------')
            model.fit(train,
                      epochs=args.epochs,
                      patience=args.patience,
                      batch_size=args.batch_size)
            model.consolidate(train,
                              alpha=args.ewc,
                              batch_size=args.batch_size)
            print()

        if task[0] == '-':
            print(f'-------- Scoring {task[1:]} --------')
            scores = {
                'accuracy': Accuracy(),
                'true positives': TruePositives(),
                'false positives': FalsePositives(),
                'true negatives': TrueNegatives(),
                'false negatives': FalseNegatives(),
                'precision': Precision(),
                'recall': Recall(),
                'f-score': FScore(),
            }
            for metric, criteria in scores.items():
                score = model.test(test, criteria, batch_size=args.batch_size)
                print(f'{metric:15}: {score}')
            print()
pil_to_tensor = transforms.ToTensor()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

#-------------------------------------

scratch_model = nn.Sequential(
    resnet.resnet50(pretrained=False, num_classes=args.kmeans),
)

""" training """
train_path = f"{ ROOT }/dataset/{ args.data }/train_resize/good"
label_name = f"{ ROOT }/preprocessData/label/fullPatch/RoNet/{ args.data }/kmeans_{ args.kmeans }.pth"
mask_path  = f"{ ROOT }/dataset/big_mask/"

""" Load part of pretrained model """
pretrain_model = AlexNet.AlexNet({'num_classes': 4})
pretrain_model.load_state_dict(torch.load(f"{ ROOT }/models/RoNet/model_net_epoch50")['network'])
pretrain_model = pretrain_model.to(device) # nn.DataParallel(pretrain_model).to(device)

print('training data: ', train_path)
print('training label: ', label_name)

""" testing """
if (args.type == 'good'):
    test_path           = f"{ ROOT }/dataset/{ args.data }/test_resize/good"
    test_label_name     = f"{ ROOT }/preprocessData/label/RoNet/{ args.data }/test/good_{ args.kmeans }.pth"
    all_test_label_name = f"{ ROOT }/preprocessData/label/RoNet/{ args.data }/test/all_{ args.kmeans}.pth"
else:
    test_path           = f"{ ROOT }/dataset/{ args.data }/test_resize/{ args.type }"
    defect_gt_path      = f"{ ROOT }/dataset/{ args.data }/ground_truth_resize/{ args.type }"
    test_label_name     = f"{ ROOT }/preprocessData/label/RoNet/{ args.data }/test/{ args.type }_{ args.kmeans }.pth"
params = {
    'input_size': INPUT_SIZE,
    'output_size': OUTPUT_SIZE,
    'input_channels': INPUT_CHANNELS,
    'output_channels': OUTPUT_CAHNNELS,
    'num_classes': NUM_CLASSES,
    'batch_size': BATCH_SIZE,
    'processing_classes': PROCESSING_CLASSES,
    'output_function': OUTPUT_FUNCTION
}

train_generator = DataGenerator(train_data, **params)
validation_generator = DataGenerator(val_data, **params)

model = AlexNet(INPUT_SIZE + (len(INPUT_CHANNELS), ), NUM_CLASSES, INITIALIZER)

model.summary()

optimizer = Adam(lr=1e-4)
tensorboard = TensorBoard(log_dir='./Graph',
                          histogram_freq=0,
                          write_grads=False,
                          batch_size=BATCH_SIZE,
                          write_images=False)
reduce_on_plateau = ReduceLROnPlateau(monitor='val_loss',
                                      factor=0.3,
                                      patience=5,
                                      mode='min',
                                      min_delta=1e-4)
early_stopping = EarlyStopping(monitor='val_loss',
Beispiel #6
0
def train(args):
    ############################################
    # Setup Dataloader
    data_path = get_data_path(args.dataset)
    dataset = dset.ImageFolder(root=data_path,
                               transform=transforms.Compose([
                                   transforms.Scale(2*args.img_rows),
                                   transforms.RandomCrop(args.img_rows),
                                   transforms.RandomHorizontalFlip(),
                                   transforms.ToTensor(),
                                   transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                               ]))

    n_classes = len(dataset.classes)
    dataloader = data.DataLoader(dataset, batch_size=args.batch_size, num_workers=4, shuffle=True)
    ############################################

    ######## pretrained = FALSE ===>>> NO PRE-TRAINING ########
    ############################################
    # Setup Model
    """
    model = models.alexnet(pretrained=False)
    block = list(model.classifier.children())
    old_layer = block.pop()
    num_feats = old_layer.in_features
    block.append(nn.Linear(num_feats, n_classes))
    model.classifier = nn.Sequential(*block)
    """
    model = AlexNet(num_classes=n_classes)
    ############################################

    ############################################
    # Random weight initialization
    model.apply(weights_init)
    ############################################

    ############################################
    # If resuming the training from a saved model
    if args.model_path != '':
        print('\n' + '-' * 40)
        model = torch.load(args.model_path)
        print('Restored the trained network {}'.format(args.model_path))
    ############################################


    ############################################
    criterion = nn.CrossEntropyLoss() # Loss criterion
    # Porting the networks to CUDA
    if torch.cuda.is_available():
        model.cuda(args.gpu)
        criterion.cuda(args.gpu)
    ############################################

    ############################################
    # Defining the optimizer over the network parameters
    optimizerSS = torch.optim.SGD([{'params': model.features.parameters()},
                                        {'params': model.classifier.parameters(), 'lr':1*args.l_rate}],
                                  lr=args.l_rate, momentum=0.9, weight_decay=5e-4)
    optimizerSS_init = copy.deepcopy(optimizerSS)
    ############################################

    ############################################
    # TRAINING:
    for epoch in range(args.n_epoch):
        for i, (images, labels) in enumerate(dataloader):
            ######################
            # Porting the data to Autograd variables and CUDA (if available)
            if torch.cuda.is_available():
                images = Variable(images.cuda(args.gpu))
                labels = Variable(labels.cuda(args.gpu))
            else:
                images = Variable(images)
                labels = Variable(labels)

            ######################
            # Scheduling the learning rate
            #adjust_learning_rate(optimizerSS, args.l_rate, epoch)
            adjust_learning_rate_v2(optimizerSS, optimizerSS_init, epoch, step=20)

            ######################
            # Setting the gradients to zero at each iteration
            optimizerSS.zero_grad()
            model.zero_grad()

            ######################
            # Passing the data through the network
            outputs = model(images)

            ######################
            # Computing the loss and doing back-propagation
            loss = criterion(outputs, labels)
            loss.backward()

            ######################
            # Updating the parameters
            optimizerSS.step()

            if (i+1) % 20 == 0:
                print("Iter [%d/%d], Epoch [%d/%d] Loss: %.4f" % (i+1, len(dataloader), epoch+1, args.n_epoch, loss.data[0]))

        # test_output = model(test_image)
        # predicted = loader.decode_segmap(test_output[0].cpu().data.numpy().argmax(0))
        # target = loader.decode_segmap(test_segmap.numpy())

        # vis.image(test_image[0].cpu().data.numpy(), opts=dict(title='Input' + str(epoch)))
        # vis.image(np.transpose(target, [2,0,1]), opts=dict(title='GT' + str(epoch)))
        # vis.image(np.transpose(predicted, [2,0,1]), opts=dict(title='Predicted' + str(epoch)))
        if (epoch+1) % 5 == 0:
            if args.model_path != '':
                torch.save(model, "./{}/double_scale_model_{}_{}_{}_from_{}.pkl" .format(args.save_folder, args.arch, args.dataset, epoch, args.model_path))
            else:
                torch.save(model, "./{}/double_scale_model_{}_{}_{}.pkl".format(args.save_folder, args.arch, args.dataset, epoch))