Exemplo n.º 1
0
    def load_data(self, varbose=False, image_only=False, train_data_rate=0.7):

        print('Start loading image files...')
        if image_only:
            paths = glob(os.path.join(args.image_root, '*'))
            print('loaded {} data'.format(len(paths)))

            pivot = int(len(paths) * train_data_rate)
            paths_sep = {'train': paths[:pivot], 'test': paths[pivot:]}
            loader = lambda s: ImageLoader(paths_sep[s],
                                           transform=self.transform[s])

        elif args.one_hot:
            sep_data = pd.read_pickle(args.pkl_path)
            loader = lambda s: ClassImageLoader(paths=sep_data[s],
                                                transform=self.transform[s])

        else:
            df = pd.read_pickle(args.pkl_path)
            print('loaded {} data'.format(len(df)))
            pivot = int(len(df) * train_data_rate)
            df_shuffle = df.sample(frac=1)
            df_sep = {'train': df_shuffle[:pivot], 'test': df_shuffle[pivot:]}
            del df, df_shuffle
            loader = lambda s: FlickrDataLoader(args.image_root,
                                                df_sep[s],
                                                self.cols,
                                                transform=self.transform[s])

        train_set = loader('train')
        test_set = loader('test')
        print('train:{} test:{} sets have already loaded.'.format(
            len(train_set), len(test_set)))
        return train_set, test_set
Exemplo n.º 2
0
    def load_data(self, varbose=False, image_only=False, train_data_rate=0.7):

        print('Start loading image files...')
        if image_only:
            paths = glob(os.path.join(args.image_root, '*'))
            print('loaded {} data'.format(len(paths)))
            pivot = int(len(paths) * train_data_rate)
            paths_sep = {'train': paths[:pivot], 'test': paths[pivot:]}
            loader = lambda s: ImageLoader(paths_sep[s],
                                           transform=self.transform[s])

        else:
            df = pd.read_pickle(args.pkl_path)

            temp = pd.read_pickle(
                '/mnt/fs2/2019/okada/from_nitta/parm_0.3/sepalated_data_wo-outlier.pkl'
            )
            df_ = temp.loc[:, self.cols].fillna(0)
            df_mean = df_.mean()
            df_std = df_.std()

            df.loc[:, self.cols] = (df.loc[:, self.cols].fillna(0) -
                                    df_mean) / df_std

            print('loaded {} signals data'.format(len(df)))
            df_shuffle = df.sample(frac=1)
            df_sep = {
                'train': df_shuffle[df_shuffle['mode'] == 'train'],
                'test': df_shuffle[df_shuffle['mode'] == 'test']
            }
            del df, df_shuffle, temp
            loader = lambda s: FlickrDataLoader(args.image_root,
                                                df_sep[s],
                                                self.cols,
                                                transform=self.transform[s])

        train_set = loader('train')
        test_set = loader('test')
        print('train:{} test:{} sets have already loaded.'.format(
            len(train_set), len(test_set)))
        return train_set, test_set
Exemplo n.º 3
0
    for k in range(splits):
        part = preds[k * (N // splits):(k + 1) * (N // splits), :]
        py = np.mean(part, axis=0)
        scores = []
        for i in range(part.shape[0]):
            pyx = part[i, :]
            scores.append(entropy(pyx, py))
        split_scores.append(np.exp(np.mean(scores)))

    return np.mean(split_scores), np.std(split_scores)


if __name__ == '__main__':
    paths = glob(args.input_path + '*.jpg')

    transform = transforms.Compose([
        transforms.Resize((args.input_size, ) * 2),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
    ])

    dataset = ImageLoader(paths, transform=transform)

    print("Calculating Inception Score...")
    print(
        inception_score(dataset,
                        cuda=args.gpu,
                        batch_size=args.batch_size,
                        resize=True,
                        splits=10))
Exemplo n.º 4
0

if __name__ == '__main__':
    transform = nn.Sequential(
        # transforms.Resize((args.input_size,) * 2),
        transforms.ConvertImageDtype(torch.float32),
        transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
    )
    s_bs = args.batch_size
    r_bs = args.batch_size

    if args.image_only:
        sep_data = glob(os.path.join(args.image_root, '*.png'))
        print('loaded {} data'.format(len(sep_data)))

        dataset = ImageLoader(paths=sep_data, transform=transform, inf=True)
    else:
        cols = ['tempC', 'uvIndex', 'visibility', 'windspeedKmph', 'cloudcover', 'humidity', 'pressure', 'DewPointC']

        # get norm paramator
        temp = pd.read_pickle('/mnt/fs2/2019/Takamuro/m2_research/flicker_data/wwo/2016_17/lambda_0/outdoor_all_dbdate_wwo_weather_2016_17_delnoise_WoPerson_sky-10_L-05.pkl')
        df_ = temp.loc[:, cols].fillna(0)
        df_mean = df_.mean()
        df_std = df_.std()

        df = pd.read_pickle(args.s_pkl_path)
        df_.loc[:, cols] = (df_.loc[:, cols].fillna(0) - df_mean) / df_std
        df.loc[:, cols] = (df.loc[:, cols].fillna(0) - df_mean) / df_std
        df_sep = df[df['mode'] == 'test']
        print('loaded {} signals data'.format(len(df_sep)))
        del df, df_, temp
Exemplo n.º 5
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import matplotlib.pyplot as plt
import numpy as np

from dataset import ImageLoader, Dataset, RandomPipeline, Config

dirname = 'data'
image_ind = 5

loader = ImageLoader(dirname, id='tmc')
loader.load('image')
loader.load('label')
loader.load('mask')
dataset = Dataset(images=loader.images)

pipeline = RandomPipeline()
pipeline.register('sigmoid_intensity', 'cropping')
dataset.add_pipeline(pipeline)

image, label = dataset[image_ind]
print(np.unique(label))
shape = image.shape[1:]

dataset = Dataset(images=loader.images)
pipeline = RandomPipeline()
pipeline.register('cropping')
dataset.add_pipeline(pipeline)
orig_image, label = dataset[image_ind]
print(np.unique(label))
Exemplo n.º 6
0
    photo_list = sorted(glob(os.path.join(args.image_root, '*', '*.jpg'), recursive=True))
    print('{} loaded'.format(len(photo_list)))
    # for FlickerDataLoader
    # transform = nn.Sequential(
    #     transforms.ConvertImageDtype(torch.float32),
    #     transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
    #     )

    # for ImageLoader
    transform = transforms.Compose([
        transforms.Resize((args.input_size,)*2),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
    ])

    dataset = ImageLoader(photo_list, transform=transform)

    loader = torch.utils.data.DataLoader(
            dataset,
            batch_size=args.batch_size,
            num_workers=args.num_workers,
            drop_last=True
            )

    # load model
    estimator = torch.load(args.estimator_path)
    estimator.eval()
    estimator.to('cuda')

    bs = args.batch_size
Exemplo n.º 7
0
import matplotlib.pyplot as plt

from dataset import ImageLoader
from discriminator import Discriminator
from generator import Generator


# IMG_ROOT = '../Scripts/dataset/train_image/'
# LABEL_ROOT = '../Scripts/dataset/train_label/'

BASE_PATH='/gpfs/workdir/houdberta/gan-getting-started/'
MONET_PATH = os.path.join(BASE_PATH, 'monet_jpg')
PHOTO_PATH = os.path.join(BASE_PATH, 'photo_jpg')

BATCH_SIZE = 4
trainset = ImageLoader(MONET_PATH,PHOTO_PATH)
loader = DataLoader(trainset,BATCH_SIZE,shuffle=True)

device = 'cuda:0'

def discriminator_real_loss(real):
  criterion = nn.BCEWithLogitsLoss()
  real_loss = criterion(real,torch.ones_like(real))
  return real_loss
  
def discriminator_fake_loss(generated):
  criterion = nn.BCEWithLogitsLoss()
  generated_loss = criterion(generated,torch.zeros_like(generated))
  return generated_loss
  
def id_loss(real,generated,Lambda=2e-4):
Exemplo n.º 8
0
    os.system('mkdir {0}'.format(opt.checkpoints_folder))

print('Model checkpoints directory created')

##checking for GPU if available
cudnn.benchmark = True

batch_size = opt.batchSize

if torch.cuda.is_available() and not opt.cuda:
    print(
        "WARNING: You have a CUDA device, so you should probably run with --cuda"
    )

#Loading training and test data
traindataset = ImageLoader(image_folder = 'samples_generated', label_csv = 'sample_text.csv', \
    transform =transforms.Compose([transforms.Resize((32,100)), transforms.ToTensor()]))

assert traindataset

train_dataloader = DataLoader(traindataset, batch_size=batch_size, \
    shuffle=True, num_workers=0,drop_last = True)

testdataset = ImageLoader(image_folder = 'samples_generated', label_csv = 'sample_text.csv', \
    transform =transforms.Compose([transforms.Resize((32,100)), transforms.ToTensor()]))

assert testdataset

test_dataloader = DataLoader(testdataset,
                             batch_size=batch_size,
                             num_workers=0,
                             drop_last=True)