Esempio n. 1
0
    def __init__(self,
                 image_dim=None,
                 tr_ratio=5,
                 gen_noise_dim=100,
                 num_filters=(128, 64),
                 dkernel=3,
                 gkernel=3,
                 nsamples=4,
                 dropout=0.25,
                 resize=2,
                 exp=None):
        """
        Parameter initialization
        :param batch:
        :param tr_ratio:
        :param gr_penalty:
        """
        config = Config()
        self.output_dir = config.output_dir
        self.TRAINING_RATIO = tr_ratio  # Traning/generator ratio
        self.GRADIENT_PENALTY_WEIGHT = 10
        self.generator_noise_dimensions = gen_noise_dim  # Dimension of the noise
        self.num_filters = num_filters  # Number of filters in the kernels

        self.nsamples = nsamples  # Number of samples generated

        self.experiment = f"{strftime('%Y%m%d%H%M%S')}"
        self.exp = exp
        self.image_dim = image_dim
        self.dkernel = dkernel  # Size of the discriminator kernels
        self.gkernel = dkernel  # Size of the generator kernels
        self.dropout = dropout
        self.dense = 512
        self.resize = resize
Esempio n. 2
0
    def __init__(self,
                 image_dim=None,
                 tr_ratio=5,
                 gen_noise_dim=100,
                 num_filters=(128, 64),
                 dkernel=3,
                 gkernel=3,
                 nsamples=4,
                 dropout=0.25,
                 resize=2,
                 exp=None):
        config = Config()
        self.output_dir = config.output_dir

        self.num_filters = num_filters  # Number of filters in the kernels

        self.nsamples = nsamples  # Number of samples generated
        self.experiment = f"{strftime('%Y%m%d%H%M%S')}"
        self.exp = exp

        self.dkernel = dkernel  # Size of the discriminator kernels
        self.gkernel = dkernel  # Size of the generator kernels
        self.image_dim = image_dim
        # xdim, ydim, chann = self.image_dim
        self.dropout = dropout
        self.resize = resize

        self.latent_dim = gen_noise_dim

        # Following parameter and optimizer set as recommended in paper
        self.n_critic = tr_ratio
        self.clip_value = 0.01
        optimizer = RMSprop(lr=0.00005)

        # Build and compile the critic
        self.critic = self.build_critic()
        self.critic.compile(loss=self.wasserstein_loss,
                            optimizer=optimizer,
                            metrics=['accuracy'])

        # Build the generator
        self.generator = self.build_generator()

        # The generator takes noise as input and generated imgs
        z = Input(shape=(self.latent_dim, ))
        img = self.generator(z)

        # For the combined model we will only train the generator
        self.critic.trainable = False

        # The critic takes generated images as input and determines validity
        valid = self.critic(img)

        # The combined model  (stacked generator and critic)
        self.combined = Model(z, valid)
        self.combined.compile(loss=self.wasserstein_loss,
                              optimizer=optimizer,
                              metrics=['accuracy'])
Esempio n. 3
0
def generate_dataset(ldaysTr,
                     z_factor,
                     badpixels=[None, None],
                     lapent=[None, None, None, None]):
    """
    Generates a training and test datasets from the days in the parameters
    z_factor is the zoom factor to rescale the images
    :param cpatt:
    :param ldaysTr:
    :param z_factor:
    :param method:
    :return:

    """
    config = Config()
    cameras_path = config.cameraspath

    ldata = []
    image = TrafficImage()
    for day in ldaysTr:
        camdic = get_day_images_data(config.cameraspath, day)
        for t in camdic:
            for cam in camdic[t]:
                image.load_image(cameras_path + day + '/' + str(t) + '-' +
                                 cam + '.gif')
                if not image.corrupted and image.is_correct():
                    image.transform_image(z_factor=z_factor, crop=(0, 0, 0, 0))
                    if image.trans:
                        filter_image = False
                        if badpixels[0] is not None:
                            filter_image = filter_image or image.greyscale_histo(
                                badpixels[0], badpixels[1])
                            # filter_image = filter_image or image.truncated_bad_pixels(badpixels[0], badpixels[1])
                        if lapent[0] is not None:
                            laplacian = image.var_laplacian()
                            print(laplacian)
                            # Filter if laplacian is less than first threshold
                            filter_image = filter_image or laplacian < lapent[0]
                            # if not filter if laplacian is less than second threshold and
                            # entropy is less than entropy threshold
                            if not filter_image and laplacian < lapent[2]:
                                entropy = image.entropy(lapent[3])
                                filter_image = entropy < lapent[4]

                        if not filter_image:
                            ldata.append(image.get_data())
                            # print(cameras_path + day + '/' + str(t) + '-' + cam + '.gif')

    X_train = np.array(ldata)

    return X_train
Esempio n. 4
0
    def __init__(self,
                 discriminator_extra_steps=3,
                 gp_weight=10.0,
                 image_dim=None,
                 tr_ratio=5,
                 gen_noise_dim=100,
                 num_filters=(128, 64),
                 dkernel=3,
                 gkernel=3,
                 nsamples=4,
                 dropout=0.25,
                 resize=2,
                 exp=None):
        super(WGANGP, self).__init__()
        self.discriminator = self.make_discriminator()
        self.generator = self.make_generator()
        self.latent_dim = gen_noise_dim
        self.d_steps = discriminator_extra_steps
        self.gp_weight = gp_weight

        config = Config()
        self.output_dir = config.output_dir
        self.TRAINING_RATIO = tr_ratio  # Traning/generator ratio
        self.generator_noise_dimensions = gen_noise_dim  # Dimension of the noise
        self.num_filters = num_filters  # Number of filters in the kernels

        self.nsamples = nsamples  # Number of samples generated

        self.experiment = f"{strftime('%Y%m%d%H%M%S')}"
        self.exp = exp
        self.image_dim = image_dim
        self.dkernel = dkernel  # Size of the discriminator kernels
        self.gkernel = dkernel  # Size of the generator kernels
        self.dropout = dropout
        self.dense = 512
        self.resize = resize

        self.d_optimizer = keras.optimizers.Adam(learning_rate=0.0002,
                                                 beta_1=0.5,
                                                 beta_2=0.9)
        self.g_optimizer = keras.optimizers.Adam(learning_rate=0.0002,
                                                 beta_1=0.5,
                                                 beta_2=0.9)
        self.d_loss_fn = self.discriminator_loss
        self.g_loss_fn = self.generator_loss
Esempio n. 5
0
    def __init__(self, ldays, zfactor):
        """
        Checks if the file exists

        :param datapath:
        :param days:
        :param zfactor:
        :param nclases:
        :param merge: Merge classes
        """

        self.config = Config()
        self.fname = f'{self.config.datapath}/Data-{name_days_file(ldays)}-Z{zfactor:0.2f}.hdf5'
        self.X_train = None
        self.input_shape = None

        if not os.path.isfile(self.fname):
            raise Exception('Data file does not exists')
        self.handle = None
Esempio n. 6
0
    #     print 'F_HOUR_BAN = ', args.hourban[1]
    if args.greyhisto is not [None, None]:
        print('GREY_HISTO = ', args.greyhisto)
    if args.lapent is not [None, None, None, None]:
        print('LAPLACIAN_ENTROPY = ', args.lapent)
    # if args.augmentation:
    #     print 'AUGMENTATION = ', args.augmentation
    print('COMPRESS = ', compress)

    print()
    print('Processing images ...')
    print()
    data = generate_dataset(days,
                            z_factor=z_factor,
                            badpixels=(nbin, perc),
                            lapent=(lap1, lap2, bins, ent))

    print(f'{data.shape[0]} Images')

    config = Config()
    nf = name_days_file(days)
    sfile = h5py.File(f'{config.datapath}/Data-{nf}-Z{z_factor:0.2f}.hdf5',
                      'w')

    sfile.require_dataset('data',
                          data.shape,
                          dtype='f',
                          data=data,
                          compression=compress)
    sfile.flush()
    sfile.close()