コード例 #1
0
    def __init__(self, nb_classes=2):
        self.train_images = None
        self.train_labels = None

        self.valid_images = None
        self.valid_labels = None

        self.test_images = None
        self.test_labels = None

        self.input_shape = None

        self.nb_classes = nb_classes

        self.datasets = LoadData()
コード例 #2
0
import random
import matplotlib.image as mpimg
import numpy as np
tf.python.control_flow_ops = tf  #Fix error with TF and Keras
from load_dataset import LoadData
from model import modelClass
from preprocess_augmentation import *

if __name__ == '__main__':
    #Parameters
    left_stcor = 0.28
    right_stcor = -0.28
    imgN = 5

    #Load Data img and CSV
    dataO = LoadData(location='../data/driving_log.csv', validationSplit=.2)

    print('data', type(dataO.data))
    print('valid_data', type(dataO.valid_data))
    print('train_data', type(dataO.train_data))

    #Split labels
    y_train, y_validation = dataO.split_labels(imgN=5,
                                               left_stcor=0.28,
                                               right_stcor=-0.28)

    #Get model
    modelO = modelClass()

    #Prepare data batch
    batchSize = 120
コード例 #3
0
class Dataset:
    def __init__(self, nb_classes=2):
        self.train_images = None
        self.train_labels = None

        self.valid_images = None
        self.valid_labels = None

        self.test_images = None
        self.test_labels = None

        self.input_shape = None

        self.nb_classes = nb_classes

        self.datasets = LoadData()

    def load(self, grey):
        faces, genders = self.datasets.load_fbDataset(grey=grey)
        # faces, genders = self.datasets.load_extra_dataset(grey=grey)
        # faces, genders = self.datasets.load_extra_UTKdataset(grey=grey)
        #faces, genders = self.datasets.load_extra_wikiDataset(grey=grey)
        faces = np.array(faces)
        genders = np.array(genders)

        train_images, valid_images, train_labels, valid_labels = train_test_split(
            faces, genders, test_size=0.2, random_state=0)
        # train_images, valid_images, train_labels, valid_labels = train_test_split(train_images, train_labels, test_size=0.2, random_state=0)

        if grey == 1:
            train_images = train_images.reshape(train_images.shape[0],
                                                self.datasets.IMAGE_SIZE,
                                                self.datasets.IMAGE_SIZE, 1)
            valid_images = valid_images.reshape(valid_images.shape[0],
                                                self.datasets.IMAGE_SIZE,
                                                self.datasets.IMAGE_SIZE, 1)
            # test_images = test_images.reshape(test_images.shape[0], self.datasets.IMAGE_SIZE, self.datasets.IMAGE_SIZE, 3)
            self.input_shape = (self.datasets.IMAGE_SIZE,
                                self.datasets.IMAGE_SIZE, 1)
        else:
            train_images = train_images.reshape(train_images.shape[0],
                                                self.datasets.IMAGE_SIZE,
                                                self.datasets.IMAGE_SIZE, 3)
            valid_images = valid_images.reshape(valid_images.shape[0],
                                                self.datasets.IMAGE_SIZE,
                                                self.datasets.IMAGE_SIZE, 3)
            # test_images = test_images.reshape(test_images.shape[0], self.datasets.IMAGE_SIZE, self.datasets.IMAGE_SIZE, 3)
            self.input_shape = (self.datasets.IMAGE_SIZE,
                                self.datasets.IMAGE_SIZE, 3)

        print(train_images.shape[0], "train samples")
        print(valid_images.shape[0], 'valid samples')
        # print(test_images.shape[0], 'test samples')
        train_labels = np_utils.to_categorical(train_labels)
        valid_labels = np_utils.to_categorical(valid_labels)
        # test_labels = np_utils.to_categorical(test_labels)
        self.nb_classes = train_labels.shape[1]

        train_images = train_images.astype('float32')
        valid_images = valid_images.astype('float32')
        # test_images = test_images.astype('float32')

        train_images /= 255
        valid_images /= 255
        # test_images /= 255

        self.train_images = train_images
        self.valid_images = valid_images
        # self.test_images = test_images
        self.train_labels = train_labels
        self.valid_labels = valid_labels
コード例 #4
0
ファイル: train_model.py プロジェクト: liyuanhao6/PBL_Project
if cuda:
    generator.cuda()
    discriminator.cuda()
    adversarial_loss.cuda()

# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)

# Configure data loader
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])
print('Using {} dataloader workers every process'.format(nw))
dataloader = torch.utils.data.DataLoader(
    LoadData(
        dped_dir=dped_dir,
        dataset_size=train_size,
        image_size=PATCH_SIZE,
        test=False,
    ),
    batch_size=batch_size,
    shuffle=True,
    num_workers=nw,
    pin_memory=True,
)

# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(),
                               lr=learning_rate,
                               betas=(0.5, 0.99))
optimizer_D = torch.optim.Adam(discriminator.parameters(),
                               lr=learning_rate,
                               betas=(0.5, 0.99))