Ejemplo n.º 1
0
def test1():
    dataset = ImageDataset.get_cifar10()

    model = SwapModel()


    optimizer = tf.keras.optimizers.Adam(0.001)
    model.compile(optimizer=optimizer, loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
    model.fit(dataset.train_images, dataset.train_labels, epochs=1)

    model.save('test_save')

    model.evaluate(dataset.test_images, dataset.test_labels)

    model.summary()
    model.swap()
    model.compile(optimizer=optimizer, loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
    model.summary()
    print(model.layers)

    model = tf.keras.models.load_model('test_save')
    model.summary()

    model.evaluate(dataset.test_images, dataset.test_labels)
    model.fit(dataset.train_images, dataset.train_labels, epochs=1)
    model.evaluate(dataset.test_images, dataset.test_labels)
Ejemplo n.º 2
0
def test2():
    dataset = ImageDataset.get_build_set()

    model_holder = ModelHolder()

    model_input = tf.keras.Input([16, 16, 3])
    keras_model = model_holder.build(model_input)


    print(model_holder.lay2.get_weights())

    model_holder.swap()
    keras_model = model_holder.build(model_input)

    print(model_holder.lay2.get_weights())


    # model_output = keras_model(model_input)
    # temp_model = tf.keras.Model(inputs=model_input, outputs=model_output)

    keras_model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))


    # logdir = os.path.join(tensorboard_dir, 'sandbox_test_' + str(time.time()))
    # writer = tf.summary.create_file_writer(logdir)
    # tf.summary.trace_on(graph=True, profiler=False)


    keras_model.fit(dataset.train_images, dataset.train_labels, epochs=1)

    tf.keras.utils.plot_model(keras_model, 'model_image.png', expand_nested=True, show_layer_names=True, show_shapes=True)
Ejemplo n.º 3
0
def verify_mutations():
    params = Hyperparameters()
    params.parameters['TRAIN_ITERATIONS'] = 1
    params.parameters['REDUCTION_EXPANSION_FACTOR'] = 2

    dataset = ImageDataset.get_cifar10_reduced()

    for i in range(50):
        model = MetaModel(params)
        model.populate_with_nasnet_metacells()
        model.build_model(dataset.images_shape)

        model.mutate()
        model.mutate()
        model.mutate()
        model.clear_model()
        tf.keras.backend.clear_session()
Ejemplo n.º 4
0
def verify_load():
    dir_path = os.path.join(evo_dir, 'test_load_v2')
    if not os.path.exists(dir_path):
        os.makedirs(dir_path)
    params = Hyperparameters()
    params.parameters['TRAIN_ITERATIONS'] = 1
    params.parameters['REDUCTION_EXPANSION_FACTOR'] = 2

    dataset = ImageDataset.get_cifar10_reduced()

    for i in range(50):
        model = MetaModel(params)
        model.populate_with_nasnet_metacells()
        model.build_model(dataset.images_shape)
        model.save_model(dir_path)
        model.save_metadata(dir_path)
        model.clear_model()
        tf.keras.backend.clear_session()

        other_model = MetaModel.load(dir_path, model.model_name, True)

        tf.keras.backend.clear_session()
Ejemplo n.º 5
0
    device = torch.device("cuda:0" if use_cuda else "cpu")
    torch.backends.cudnn.benchmark = True

    # Load annotations
    annotations_df = pd.read_csv('annotations.csv')

    # Debug
    #annotations_df = annotations_df.iloc[0:100]

    # Dataloader
    datasets = {}
    dataloaders = {}
    dataset_sizes = {}
    for split in ['valid']:
        datasets[split] = ImageDataset(df=annotations_df,
                                       split=split,
                                       im_size=299)

        dataloaders[split] = DataLoader(datasets[split],
                                        batch_size=9,
                                        shuffle=(split != 'test'),
                                        num_workers=4,
                                        pin_memory=True)

        dataset_sizes[split] = len(datasets[split])

    model = FamilyNet(n_classes=4).to(device)
    model.load_state_dict(torch.load('FamilyNet.pth'))

    torch.set_grad_enabled(False)
    model.train(False)
Ejemplo n.º 6
0
    PATH = './model.pt'

    if torch.cuda.is_available():
        device = torch.device('cuda:0')
    else:
        device = torch.device('cpu')

    # model = AlexNet_model().to(device)
    model = Resnet_model().to(device)
    model.load_state_dict(torch.load(PATH))
    model.eval()  # Set model to evaluate mode

    transform = Rescale((224, 224))

    image_datasets = {
        x: ImageDataset(data_use=x, transform=transform)
        for x in ['train', 'val']
    }
    dataloaders_dict = {
        x: DataLoader(image_datasets[x],
                      batch_size=10,
                      shuffle=False,
                      num_workers=0)
        for x in ['train', 'val']
    }

    Res_dict = {'train': {}, 'val': {}}
    Res_dict['train']['pred'] = np.zeros(800).astype(int)
    Res_dict['train']['bbox_pred'] = np.zeros((800, 4)).astype(int)
    Res_dict['val']['pred'] = np.zeros(100).astype(int)
    Res_dict['val']['bbox_pred'] = np.zeros((100, 4)).astype(int)
def train(opt):

    netG_A2B = Unet2(3, 3)
    netG_B2A = Unet2(3, 3)
    netD_A = Discriminator(3)
    netD_B = Discriminator(3)

    if opt.use_cuda:
        netG_A2B = netG_A2B.cuda()
        netG_B2A = netG_B2A.cuda()
        netD_A = netD_A.cuda()
        netD_B = netD_B.cuda()

    netG_A2B_optimizer = optimizer.Adam(params=netG_A2B.parameters(),
                                        lr=opt.lr,
                                        betas=(0.5, 0.999))
    netG_B2A_optimizer = optimizer.Adam(params=netG_B2A.parameters(),
                                        lr=opt.lr,
                                        betas=(0.5, 0.999))
    netD_A_optimizer = optimizer.Adam(params=netD_A.parameters(),
                                      lr=opt.lr,
                                      betas=(0.5, 0.999))
    netD_B_optimizer = optimizer.Adam(params=netD_B.parameters(),
                                      lr=opt.lr,
                                      betas=(0.5, 0.999))

    optimizers = dict()
    optimizers['G1'] = netG_A2B_optimizer
    optimizers['G2'] = netG_B2A_optimizer
    optimizers['D1'] = netD_A_optimizer
    optimizers['D2'] = netD_B_optimizer

    # Dataset loader
    transforms_ = [
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ]

    tarindataloader = DataLoader(ImageDataset(opt.dataroot,
                                              transforms_=transforms_,
                                              unaligned=True),
                                 batch_size=opt.batchSize,
                                 shuffle=True)

    #writer
    writer = SummaryWriter(opt.log_dir)

    for epoch in range(0, opt.n_epochs):
        for ii, batch in enumerate(tarindataloader):
            # Set model input
            real_A = Variable(batch['A'])
            real_B = Variable(batch['B'])

            if opt.use_cuda:
                real_A = real_A.cuda()
                real_B = real_B.cuda()

            train_one_step(use_cuda=opt.use_cuda,
                           netG_A2B=netG_A2B,
                           netG_B2A=netG_B2A,
                           netD_A=netD_A,
                           netD_B=netD_B,
                           real_A=real_A,
                           real_B=real_B,
                           optimizers=optimizers,
                           iteration=ii,
                           writer=writer)

            print("\nEpoch: %s Batch: %s" % (epoch, ii))

    writer.export_scalars_to_json("./all_scalars.json")
    writer.close()
    torch.save(netG_A2B.state_dict(),
               os.path.join(opt.save_dir, '%s' % "netG_A2B"))
    torch.save(netG_B2A.state_dict(),
               os.path.join(opt.save_dir, '%s' % "netG_B2A"))
    torch.save(netD_A.state_dict(), os.path.join(opt.save_dir,
                                                 '%s' % "netD_A"))
    torch.save(netD_B.state_dict(), os.path.join(opt.save_dir,
                                                 '%s' % "netD_B"))
Ejemplo n.º 8
0
import numpy as np
import torch.optim as optim
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from torch.autograd import Variable
from Model import Net
from Dataset import ImageDataset

classes = ("Yellow", "Red", "Noise")

validation_split = .1
shuffle_dataset = True
random_seed = 42
batch_size = 16
dataset = ImageDataset(csv_file="./dataset.csv", root_dir="./../clusters")
dataset_size = len(dataset)
indices = list(range(dataset_size))
split = int(np.floor(validation_split * dataset_size))
if shuffle_dataset:
    np.random.seed(random_seed)
    np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]

train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)

train_load = torch.utils.data.DataLoader(dataset,
                                         batch_size=batch_size,
                                         sampler=train_sampler)
Ejemplo n.º 9
0
    # model = AlexNet_model().to(device)
    model = Resnet_model().to(device)
    init_lr = 0.01

    params_to_update = model.parameters()
    # optimizer = torch.optim.Adam(params_to_update, init_lr)
    optimizer = torch.optim.SGD(params_to_update, init_lr)

    transform_list = [
        RandomCrop(output_size=100),
        Rescale((224, 224)),
        ToTensor()
    ]

    image_datasets = {
        x: ImageDataset(root_dir, data_use=x, transform_list=transform_list)
        for x in ['train', 'val']
    }
    dataloaders_dict = {
        x: DataLoader(image_datasets[x],
                      batch_size=32,
                      shuffle=True,
                      num_workers=4)
        for x in ['train', 'val']
    }

    criterions = [nn.CrossEntropyLoss(), nn.MSELoss()]

    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [70, 90],
                                                     gamma=0.1)
Ejemplo n.º 10
0
import os
from Transformer import ImageTransformer
from Saver import DataSaver
from Dataset import ImageDataset
from DataHandler import DataHandler

data = os.listdir('/home/oem/train')

labels = [[1, 0] if 'cat' in i else [0, 1] for i in data]

dataset = ImageDataset(source='/home/oem/train',
                       labels=labels,
                       split_spec={'train': {'amount': 0.6, 'transform': True, 'batch_size':32},
                               'validation': {'amount': 0.2, 'transform': False, 'batch_size':32},
                               'test': {'amount': 0.2, 'transform': False, 'batch_size':32}},
                       shuffle=True)

transformer = ImageTransformer()

transformer.add_resize((256, 256), origin=True, keys=['train', 'validation', 'test'])
transformer.add_grayscale(origin=True, keys=['train', 'validation', 'test'])
transformer.add_unsharp_masking(origin=True, keys=['train', 'validation', 'test'])
transformer.add_histogram_equalization(origin=True, keys=['train', 'validation', 'test'])
transformer.add_median_filter(keys=['train'])
transformer.add_rotation([45, 60, 90], keys=['train'])
transformer.add_contrast([1.5], keys=['train'])
transformer.add_brightening([0.5], keys=['train'])
transformer.add_sharpening([2.0], keys=['train'])

saver = DataSaver('/home/oem/PycharmProjects/DeepLearning', 'CatsvsDogs')
handler = DataHandler(dataset=dataset,
Ejemplo n.º 11
0
        exit()
solver=''
if args.solver_type=='SGD':
    solver = caffe.SGDSolver(solver_prototxt_path)
if args.solver_type=='ADADELTA':
    solver = caffe.ADADELTASolver(solver_prototxt_path)
if args.solver_type=='ADAM':
    solver = caffe.AdamSolver(solver_prototxt_path)

if args.use_snapshot == None:
    if pretrained_model_path != '':
        solver.net.copy_from(pretrained_model_path) # untrained.caffemodel
else:
    solver.restore(snapshot_path)

tranining_dataset = ImageDataset(image_dir=training_data_dir)

tart_time = time.time()

max_iter = 50000000
validation_iter = 1000
plot_iter = 500
epoch=10
idx_counter = 0

x=[]
y=[]
z=[] # validation

plt.plot(x, y)
_step=0