예제 #1
0
파일: main.py 프로젝트: ACov96/upscaler
def main(args):
    files = glob(args.data_path + '*.jpg')
    training_data = ImageDataset(files[:int(len(files) * .9)])
    dev_data = ImageDataset(files[int(len(files) * .9):])
    model = train(training_data, dev_data, args)
    print('Saving model to %s' % args.o)
    save_model(model, args.o)
def get_dataloader(image_paths: List[str],
                   transform: Collection[Callable] = None,
                   size: Optional[Union[int, Tuple[int, int]]] = None,
                   shuffle: bool = True,
                   num_workers: int = NUM_WORKERS,
                   batch_size: int = BATCH_SIZE) -> DataLoader:
    """
    To get the dataloader for ImageDataset using the given image paths

    params : 
        image_paths     :   List of strings - contains the path of Images
        transforms      :   torchvision.transforms.transform - contains one 
                            or more transformations to apply on the images
        size            :   Tuple - contains the height and width or integer 
                            indicating same height and width
        shuffle         :   Boolean - To shuffle the data or not
        num_workers     :   Int - The number of workers to use 
        batch_size      :   Int - Batch Size for the dataloader(number of images to load as a batch)
    """
    dataset = ImageDataset(image_paths, transform, size)
    dataloader = DataLoader(dataset,
                            batch_size,
                            shuffle,
                            num_workers=num_workers)
    return dataloader
예제 #3
0
파일: eval.py 프로젝트: ande765a/ChromaAI
def eval(images_path, model, no_l, load, output, device, batch_size=8):
    colorizer = models[model]().to(device)
    colorizer.eval()
    colorizer.load_state_dict(torch.load(load, map_location=device))

    image_dataset = ImageDataset(images_path,
                                 transform=transforms.Compose([
                                     ToLAB(),
                                     ReshapeChannelFirst(),
                                     ToTensor()
                                 ]))

    images = DataLoader(image_dataset, batch_size=batch_size)

    with torch.no_grad():
        L, _ = next(iter(images))
        L = L.to(device)
        AB_pred = colorizer(L).cpu()
        L = L.cpu()
        output_images = torch.cat(
            ((torch.ones_like(L) * 0.5 if no_l else L) * 100, AB_pred * 128),
            dim=1).double()
        output_images = output_images.numpy().transpose((0, 2, 3, 1))

        #mean_error, std_dev, lower_bound, upper_bound = calculateStats(images, output_images, batch_size, 1.96)

        for i, array_image in enumerate(output_images):
            output_image = lab2rgb(array_image)
            imsave(os.path.join(output, "{}.png".format(i)), output_image)
예제 #4
0
def load_data(data_dir, classes):
    #  data_dir='../data'
    #  data_split='val'
    #  classes = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')
    n_epochs = 1
    emsize = 32
    batch_size = 2
    margin = 3.

    # In[5]:

    # Parameters
    n_epochs = 100
    batch_size = 8
    emsize = 128

    # In[29]:

    dataset = ImageDataset(data_dir, classes, tfms=tfms)
    n_classes = len(classes)

    imgset = [(dataset._data[i][0], img)
              for i, (img, label) in enumerate(dataset)]
    print(len(dataset))
    return imgset
예제 #5
0
    def __dataloader(self, train_mode):
        self.prepare_data()
        batch_size = self.hparams.batch_size
        num_workers = 4
        if train_mode == 'train':
            transform = self.train_transform
            dataset = ImageDataset(root='./data/train/',
                                   path_list=self.train_x,
                                   targets=self.train_y,
                                   transform=transform)

            loader = DataLoader(dataset,
                                batch_size=batch_size,
                                num_workers=num_workers,
                                shuffle=True,
                                drop_last=True,
                                pin_memory=True)

        elif train_mode == 'valid':
            dataset = ImageDataset(root='./data/train/',
                                   path_list=self.valid_x,
                                   targets=self.valid_y,
                                   transform=self.test_transform)

            loader = DataLoader(dataset,
                                batch_size=batch_size,
                                num_workers=num_workers,
                                shuffle=False,
                                drop_last=False,
                                pin_memory=True)
        else:
            dataset = ImageDataset(root='./data/test/',
                                   path_list=self.test_x,
                                   transform=self.test_transform)

            loader = DataLoader(dataset,
                                batch_size=batch_size,
                                num_workers=num_workers,
                                shuffle=False,
                                drop_last=False,
                                pin_memory=True)

        return loader
예제 #6
0
파일: stats.py 프로젝트: ande765a/ChromaAI
def stats(images_path, model, device, load, log_output):
    colorizer = models[model]().to(device)
    colorizer.eval()
    colorizer.load_state_dict(torch.load(load, map_location=device))

    image_dataset = ImageDataset(images_path,
                                 transform=transforms.Compose([
                                     ToLAB(),
                                     ReshapeChannelFirst(),
                                     ToTensor()
                                 ]))

    criterion = nn.MSELoss(size_average=False)

    images = DataLoader(image_dataset, batch_size=1)

    if log_output != None:
        with open(log_output, "w+") as log_file:
            log_file.write("i,loss\n")

    losses = []
    for i, (L, AB) in enumerate(images):
        L = L.to(device)
        AB_pred = colorizer(L).cpu()
        AB = AB.cpu()
        loss = criterion(AB_pred, AB)
        losses.append(loss.item())
        if log_output != None:
            print("{},{}".format(i, loss.item()))
            with open(log_output, "a+") as log_file:
                log_file.write("{},{}\n".format(i, loss.item()))

    return np.array(losses)
    #losses = np.array(losses)
    #mean = losses.mean()
    #var = losses.var()
    #z = 1.96
    #lower = mean - z * (var / np.sqrt(len(losses)))
    #upper = mean + z * (var / np.sqrt(len(losses)))
    #print("mean: {}\nvar: {}\nlower: {}\nupper: {}".format(mean, var, lower, upper))
def main(args: Callable):
    img_paths = get_file_names(args.images_path)
    tfs = transforms.Compose([
        transforms.ToTensor(),
    ])
    dataset = ImageDataset(img_paths, tfs, size=SIZE)
    dataloader = DataLoader(dataset,
                            shuffle=True,
                            num_workers=NUM_WORKERS,
                            batch_size=BATCH_SIZE)
    model = get_model(args.model_name)

    logging.info(summary(model, (3, 224, 224)))

    sf = SaveFeatures(list(model.children())[-1][4])

    paths, features = get_features(dataloader, sf, model)

    features_df = pd.DataFrame([paths, features]).T
    features_df['img_path'] = features_df[0].apply(lambda x: x.split('/')[-1])
    features_df.columns = ['FullPath', 'Features', 'ImageName']

    save_pickle_object(features_df, args.save_feature_path)
def main(args):
    image_ids, image_paths = read_split_image_ids_and_paths(args.split)
    image_paths = [
        os.path.join(args.ms_coco_dir, 'images', image_path)
        for image_path in image_paths
    ]
    features_dir = os.path.join(args.output_dir, f'{args.split}-features-grid')

    os.makedirs(features_dir, exist_ok=True)

    inception = inception_v3_base(pretrained=True)
    inception.eval()
    inception.to(args.device)

    transform = transforms.Compose([
        transforms.Resize((299, 299)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    dataset = ImageDataset(image_ids, image_paths, transform=transform)
    loader = torch.utils.data.DataLoader(dataset,
                                         batch_size=args.batch_size,
                                         num_workers=args.num_workers,
                                         pin_memory=args.device.type == 'cuda',
                                         shuffle=False)

    with torch.no_grad():
        for imgs, ids in tqdm.tqdm(loader):
            outs = inception(imgs.to(args.device)).permute(0, 2, 3, 1).view(
                -1, 64, 2048)
            for out, id in zip(outs, ids):
                out = out.cpu().numpy()
                id = str(id.item())
                np.save(os.path.join(features_dir, id), out)
예제 #9
0
            all_class.append(class_pred.cpu().numpy())

    return all_class




if __name__ == '__main__':


    data_dir = sys.argv[1] + "/"
    out_fn = sys.argv[2]

    transform = trns.Compose([])

    svhn_testdata = ImageDataset(data_dir, "", transform )
    svhn_test_dataloader = DataLoader(svhn_testdata, batch_size=128, shuffle=False, num_workers=1)

    model = GTA()
    model.load_state_dict(torch.load("./model/GTA_M2S"))

    model = model.cuda()
    svhn_class = model_evaluate(model.feat_extracter, model.source_classifier, svhn_test_dataloader)
    model = model.cpu()

    svhn_class = np.concatenate(svhn_class)

    with open(out_fn, 'w') as fd:
    	for i in range(svhn_class.shape[0]):
    		if i != 0:
    			fd.write('\n')
예제 #10
0
파일: main.py 프로젝트: ck20jimmy/DLCV2019
from tensorboardX import SummaryWriter

from data import ImageDataset
from model import *
from train import *




# https://arxiv.org/pdf/1704.00028.pdf


if __name__ == '__main__':

	train_data = ImageDataset("../../hw3_data/face/clean_data_dlib_cv2/", "../../hw3_data/face/train.csv")
	train_dataloader = DataLoader(train_data, batch_size=32, shuffle=True, num_workers=2)

	model = ACGAN()

	beta1 = 0.5
	beta2 = 0.9
	gen_optim = torch.optim.Adam(model.Generator.parameters(), lr=2e-4, betas=(beta1, beta2))
	dis_optim = torch.optim.Adam(model.Discriminator.parameters(), lr=2e-4, betas=(beta1, beta2))

	epochs = 2000

	save_dir = "../../model/ACGAN_DC/"

	fixed_inp = torch.zeros(64, 127)
	fixed_noise = torch.randn(32, 127)
예제 #11
0
        # x.size() = (batch_size, 2048)

        feat = self.bottleneck(x)

        if self.is_training:
            score = self.classifier(feat)
            return score, x
        else:
            return feat


if __name__ == "__main__":
    from data import Market1501, ImageDataset

    datasource = Market1501("/home/hien/Documents/datasets")
    datasets = ImageDataset(data=datasource.get_data("train"))
    transfrom_val = torchvision.transforms.Compose(
        [
            torchvision.transforms.Resize(size=(256, 128)),
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize(
                [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
            ),
        ]
    )
    images = []
    labels = []
    for i in range(4):
        image, label, _ = datasets[i]
        image = transfrom_val(image)
        images.append(image)
예제 #12
0
print('>>> Parameters Defined ')

# Image Transformations [ Resize, Convert2Tensor, and Normalzie ]
#-------------------------------------------------------------------
transform = transforms.Compose([
    transforms.Resize(int(143), Image.BICUBIC),
    transforms.RandomCrop(128),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

# ---------------------------------------------------------------------------------
# Dataloader
dataloader = DataLoader(ImageDataset(DATASET_PATH, transform_=transform),
                        batch_size=1,
                        shuffle=True,
                        num_workers=1)

batch = next(iter(dataloader))
print('\n>>> Data Loader with Transformation is Ready')

# Batch Information
batch_info = f""" 

\n--------------------------------------------
Batch Info : ( Each item from data loader )
______________________________________________

Dictionary : \n{batch.keys()}\n
예제 #13
0
n_channels = 3
dataset = "data/shapes"

image_transforms = transforms.Compose([
    PIL.Image.fromarray,
    transforms.Scale(img_size),
    transforms.ToTensor(),
    lambda x: x[:n_channels, ::],
    transforms.Normalize((0.5, 0.5, .5), (0.5, 0.5, 0.5)),
])

video_transforms = functools.partial(video_transform,
                                     image_transform=image_transforms)

dataset = VideoFolderDataset(dataset, cache=None)
image_dataset = ImageDataset(dataset, image_transforms)
image_loader = DataLoader(image_dataset,
                          batch_size=image_batch,
                          drop_last=True,
                          num_workers=2,
                          shuffle=True)

video_dataset = VideoDataset(dataset, 16, 2, video_transforms)
video_loader = DataLoader(video_dataset,
                          batch_size=video_batch,
                          drop_last=True,
                          num_workers=2,
                          shuffle=True)

generator = VideoGenerator(n_channels, dim_z_content, dim_z_category,
                           dim_z_motion, video_length)
예제 #14
0
    print(' ')

else:
    device = torch.device('cpu')
    print(' ')
    print('Running on the CPU')
    print(' ')

##############################

if MODE == 'TRAINING':
    # Init dataset
    print('Initializing dataset...')
    print(' ')
    train_dataset = ImageDataset(
        root_dir, alpha=200, sigma=10, crop=crop, ISBI2012=ISBI2012
    )  # For training + validation (in case of FOLDS = None, only for training)

    # Determine number of samples in training and validation
    samp_tr = int(np.round(tr_per * len(train_dataset)))
    samp_val = int(np.round(val_per * len(train_dataset)))

    # Round numbers so that we do not exceed total number of samples
    while samp_tr + samp_val > len(train_dataset):
        samp_val += -1

    # Generate an order vector to shuffle the samples before each fold for the cross validation
    np.random.seed(SEED)
    order = np.arange(len(train_dataset))
    np.random.shuffle(order)
예제 #15
0
def train(
        images_path,
        device,
        model,
        load,
        batch_size,
        shuffle,
        log_output,
        loss,
        num_workers,
        num_epochs,
        learning_rate,
        save_best,
        save,
        save_frequency  # Save after 10 epochs
):

    phases = ["validation", "training"]

    images_dataset = {
        x: ImageDataset(os.path.join(images_path, x),
                        transform=transforms.Compose(
                            [ToLAB(),
                             ReshapeChannelFirst(),
                             ToTensor()]))
        for x in phases
    }

    images = {
        x: DataLoader(images_dataset[x],
                      batch_size=batch_size,
                      shuffle=shuffle,
                      num_workers=num_workers)
        for x in phases
    }

    # Make instance of model
    colorizer = models[model]()

    if load != None:
        print("Loading model from: {}".format(load))
        try:
            colorizer.load_state_dict(torch.load(load))
        except:
            print("Could not load model.")
            pass

    num_cuda_devices = torch.cuda.device_count()
    print("Training on {} cuda devices.".format(num_cuda_devices))
    if num_cuda_devices > 1:
        colorizer = nn.DataParallel(colorizer)

    colorizer = colorizer.to(device)

    discriminator = None
    criterion = None
    optimizer = None
    d_optimizer = None

    if loss == "gan":
        discriminator = Discriminator()
        if load != None:
            try:
                discriminator.load_state_dict(
                    torch.load("{}.discriminator".format(load)))
            except:
                print("Could not load discriminator model.")
                pass
        discriminator = discriminator.to(device)
        criterion = nn.BCELoss()
        optimizer = optim.SGD(colorizer.parameters(), lr=learning_rate)
        d_optimizer = optim.SGD(discriminator.parameters(), lr=learning_rate)
    else:
        criterion = nn.MSELoss()
        optimizer = optim.SGD(colorizer.parameters(), lr=learning_rate)

    best_validation_loss = math.inf

    if log_output != None:
        with open(log_output, "w+") as log_file:
            log_file.write("train,training_loss,validation_loss\n")

    try:
        # Train our model
        for epoch in range(num_epochs):
            print("Epoch {}/{}".format(epoch + 1, num_epochs))

            running_train_loss = 0.0
            running_validation_loss = 0.0

            for phase in phases:
                if phase == "training":
                    colorizer.train()
                elif phase == "validation":
                    colorizer.eval()

                for L, AB in images[phase]:
                    L = L.to(device)
                    AB = AB.to(device)
                    AB_pred = colorizer(L)

                    if loss == "gan":
                        LAB = torch.cat((L, AB), dim=1)
                        LAB_gen = torch.cat((L, AB_pred), dim=1)
                        valid = torch.ones((L.shape[0], 1)).to(device)
                        invalid = torch.zeros((L.shape[0], 1)).to(device)

                        if phase == "training":

                            optimizer.zero_grad()
                            g_loss = criterion(discriminator(LAB_gen), valid)
                            g_loss.backward()
                            optimizer.step()

                            d_optimizer.zero_grad()
                            real_loss = criterion(discriminator(LAB), valid)
                            fake_loss = criterion(
                                discriminator(LAB_gen.detach()), invalid)
                            d_loss = (real_loss + fake_loss) / 2
                            d_loss.backward()
                            d_optimizer.step()

                            running_train_loss += g_loss.item() * L.size(0)

                        elif phase == "validation":
                            g_loss = criterion(discriminator(LAB_gen), valid)
                            running_validation_loss += g_loss.item() * L.size(
                                0)

                    else:
                        _loss = criterion(AB_pred, AB)
                        if phase == "training":
                            optimizer.zero_grad()
                            _loss.backward()
                            optimizer.step()
                            running_train_loss += _loss.item() * L.size(0)
                        elif phase == "validation":
                            running_validation_loss += _loss.item() * L.size(0)

            epoch_train_loss = running_train_loss / len(
                images_dataset["training"])
            epoch_validation_loss = running_validation_loss / len(
                images_dataset["validation"])
            print("Training loss: {}".format(epoch_train_loss))
            print("Validation loss: {}".format(epoch_validation_loss))

            if log_output != None:
                with open(log_output, "a+") as log_file:
                    log_file.write("{},{},{}\n".format(epoch + 1,
                                                       epoch_train_loss,
                                                       epoch_validation_loss))

            if save_best != None and epoch_validation_loss < best_validation_loss:
                print("Saving best model.")
                best_validation_loss = epoch_validation_loss
                torch.save(colorizer.state_dict(), save_best)

                if loss == "gan":
                    torch.save(discriminator.state_dict(),
                               "{}.discriminator".format(save_best))

            if save != None and epoch % save_frequency == 0:
                print("Saving model.")
                torch.save(colorizer.state_dict(), save)

                if loss == "gan":
                    torch.save(discriminator.state_dict(),
                               "{}.discriminator".format(save))

            print("-" * 30)
    except KeyboardInterrupt:
        print("Stopping training.")
        pass

    if save != None:
        print("Saving final model.")
        torch.save(colorizer.state_dict(), save)

        if loss == "gan":
            torch.save(discriminator.state_dict(),
                       "{}.discriminator".format(save))
예제 #16
0
파일: main.py 프로젝트: weizequan/examples
def main():
    args = parser.parse_args()

    cuda = torch.cuda.is_available()
    torch.manual_seed(args.seed)
    if cuda:
        torch.cuda.manual_seed(args.seed)

    # 1. dataset
    root = args.root
    kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
    train_transforms = transforms.Compose([transforms.Scale((96,128)),
                                           transforms.RandomHorizontalFlip(),
                                           transforms.ToTensor(),
                                           transforms.Normalize([0.5,0.5,0.5], [0.5,0.5,0.5]),])
    test_transforms = transforms.Compose([transforms.Scale((96,128)),
                                          transforms.ToTensor(),
                                          transforms.Normalize([0.5,0.5,0.5], [0.5,0.5,0.5])])
    train_dataset = ImageDataset(root, transform=train_transforms)
    val_dataset = copy.deepcopy(train_dataset)
    val_dataset.train = False

    train_loader = torch.utils.data.DataLoader(train_dataset,batch_size=args.batch_size, shuffle=True, **kwargs)
    val_loader = torch.utils.data.DataLoader(val_dataset,batch_size=args.test_batch_size, shuffle=False, **kwargs)

    # 2. model
    model = FaceModel(len(train_dataset.classes))
    if cuda:
        model = model.cuda()

    # 3. optimizer
    optim = torch.optim.Adam(model.parameters(), lr=args.lr, betas=(args.beta1, 0.999))
    trainer = tn.Trainer(
        cuda=cuda,
        model=model,
        optimizer=optim,
        train_loader=train_loader,
        val_loader=val_loader,
        max_iter=args.max_iter,
        center_loss_weight = args.center_loss_weight
    )

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            trainer.epoch = checkpoint['epoch']
            trainer.iteration =  trainer.epoch * len(train_loader)
            trainer.best_prec1 = checkpoint['best_prec1']
            trainer.model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
    else:
        model.apply(weights_init)
        trainer.epoch = 0
        trainer.iteration = 0
        trainer.best_prec1 = 0
    try:
        trainer.train()
    except:
        raise
예제 #17
0
from tensorboardX import SummaryWriter

from data import ImageDataset
from model import *
from train import *

# https://arxiv.org/pdf/1704.00028.pdf
# https://arxiv.org/pdf/1409.1556v6.pdf

if __name__ == '__main__':

    # transform = trns.Compose([ trns.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ])
    transform = trns.Compose([])

    mnist_data = ImageDataset("../../hw3_data/digits/mnistm/train/", \
     "../../hw3_data/digits/mnistm/train.csv", \
     transform )
    mnist_dataloader = DataLoader(mnist_data,
                                  batch_size=128,
                                  shuffle=True,
                                  num_workers=1)

    svhn_data = ImageDataset("../../hw3_data/digits/svhn/train/", \
     "../../hw3_data/digits/svhn/train.csv", \
     transform )
    svhn_dataloader = DataLoader(svhn_data,
                                 batch_size=128,
                                 shuffle=True,
                                 num_workers=1)

    mnistm_testdata = ImageDataset("../../hw3_data/digits/mnistm/test/", \
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])

        test_transforms = transforms.Compose([
            transforms.ToPILImage(),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
        if args.resize is not None:
            train_transforms.transforms.insert(0,
                                               transforms.Resize(args.resize))
            test_transforms.transforms.insert(0,
                                              transforms.Resize(args.resize))

    if args.train:
        train_set = ImageDataset(args.train_manifest, train_transforms)
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=args.num_workers)

        eval_set = ImageDataset(args.val_manifest, test_transforms)
        eval_loader = DataLoader(eval_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 num_workers=args.num_workers)

        logger.add_general_data(model, train_loader)
        train(model, criterion, optimizer, train_loader, eval_loader,
              args.epochs, lr_scheduler, args.gpu)
        print_training_summary(logger, model.name)
예제 #19
0
    parser.add_argument('-output_path', type=str, default='outputs')
    parser.add_argument('-dataset', type=str, default='Zakrzowek-B')
    parser.add_argument('-threshold', type=float, default=0.5)
    parser.add_argument('-kernel_size', type=int, default=1)

    args = parser.parse_args()

    with tf.Session() as session:
        params_path = Path(__file__).parent / 'params' / ('%s.json' % args.model_name)

        with open(params_path) as f:
            params = json.load(f)

        logging.info('Loading dataset...')

        dataset = ImageDataset([args.dataset], params['temporal_patch_size'])

        logging.info('Restoring model...')

        network = load_model(session, args.model_name)

        logging.info('Running prediction...')

        outputs, _ = predict_dataset(dataset, session, network, args.threshold)

        if args.mode == 'filter':
            logging.info('Filtering images...')

            outputs = filter_dataset(dataset, args.kernel_size, session, network, outputs, args.threshold)

        logging.info('Saving outputs to "%s"...' % args.output_path)
예제 #20
0
# you can overwrite data_path here

output_dir = data_path / 'output'
input_dir = data_path / 'input'

generator = torch.load(model_save_path / "generator.pt")
device = 'cuda' if torch.cuda.is_available() else 'cpu'
dtype = torch.float16

generator.eval()
generator.to(device, dtype)

# TODO batch size, async dataloader
file_paths = [file for file in input_dir.iterdir()]

params = {'batch_size': 1, 'num_workers': 8, 'pin_memory': True}

dataset = ImageDataset(file_paths, )
loader = DataLoader(dataset, **params)

# TODO multiprocess and asynchronous writing of files

with torch.no_grad():
    for inputs, names in tqdm(loader):
        inputs = inputs.to(device, dtype)
        outputs = generator(inputs)
        del inputs
        for j in range(len(outputs)):
            write_image_tensor(outputs[j], output_dir / names[j])
        del outputs
예제 #21
0
# In[5]:

# Parameters
n_epochs = 100
batch_size = 8
emsize = 128

# In[29]:

tfms = transforms.Compose([
    transforms.ToTensor(),
    Image,
    #     partial(crop_pad, size=512, padding_mode="zeros")
])
dataset = ImageDataset(f"{data_dir}/classy_coconut/val", classes, tfms=tfms)
n_classes = len(classes)

imgset = [img for img, label in dataset]
print(len(dataset))

# In[7]:

learner = load_learner("siamese", "export.pkl").to_fp32()

# In[12]:

#  embedlist = _embed_imgset(learner, imgset)
# np.save("classy_coconut_embeddings_val.npz", torch.stack(embedlist).numpy())

# In[33]:
from model_util import get_optimizer, loss_fn

import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
import os
import numpy as np
import time
import matplotlib.pyplot as plt

image_dir = os.path.dirname(__file__) + '/images'
num_augments = 10
batch_size = 5

train_dataset = ImageDataset(root_dir = image_dir,
                             num_augments = num_augments,
                             phase = 'train',
                             transform = augment_image())

val_dataset = ImageDataset(root_dir = image_dir,
                           num_augments = num_augments,
                           phase = 'val',
                           transform = augment_image())

test_dataset = ImageDataset(root_dir = image_dir,
                            num_augments = num_augments,
                            phase = 'test',
                            transform = augment_image())

train_dataloader = DataLoader(dataset=train_dataset,
                              batch_size=batch_size,
                              shuffle=True,