예제 #1
0
    def __init__(self,
                 num_channels=3,
                 channels_per_compresion=8,
                 depths=[3, 4, 5, 6],
                 kernel_size=3):

        self.AEs = [
            models.Autoencoder(num_channels=num_channels,
                               channels_per_compresion=channels_per_compresion,
                               depth=depth,
                               kernel_size=kernel_size) for depth in depths
        ]
    def __init__(self, df, size=(28, 28), seed=0, batch_size=8):
        super().__init__(df, size=size, seed=seed, batch_size=batch_size)

        self.loss = nn.MSELoss()  # nn.L1Loss() #
        self.model = models.Autoencoder(size=self.size)
예제 #3
0
import torch
import models
import image_loader
import torchvision
import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
import pandas as pd

model = models.Autoencoder()
model.load_state_dict(
    torch.load('../trained-models/covid-autoencoder/20201111-144840'))

num_val_images = len([i for i in os.listdir('../images/val/non_covid/')
                      ]) + len([i for i in os.listdir('../images/val/covid/')])
num_train_images = len([
    i for i in os.listdir('../images/train/non_covid/')
]) + len([i for i in os.listdir('../images/train/covid/')])
val_img_folder, val_dataset = image_loader.get_image_dataset(
    root_dir='..\images',
    split='val',
    batch_size=num_val_images,
    num_workers=0)
train_img_folder, train_dataset = image_loader.get_image_dataset(
    root_dir='..\images',
    split='train',
    batch_size=num_train_images,
    num_workers=0)

class_names = val_img_folder.classes
input_dim = dataset.shape[1]
'''
# _normalize_ the data
dataset = dataset/dataset.max()
# divide the data in training and test
'''
min_max_scaler = preprocessing.MinMaxScaler()
dataset = min_max_scaler.fit_transform(dataset)
X_train, X_test = train_test_split(dataset, \
    train_size=train_size, test_size=1-train_size)
# ----------------------------------------------------------------------- #

# ------------------------- LOAD MODEL ------------------------- #
# load correct instanced method from the name
print "Loading the model \"%s\"." % (model_type)
load_autoencoder = getattr(models.Autoencoder(), model_type)
# store all the correct models
autoencoder = load_autoencoder(input_dim, reduc_dim, \
    activation_function, layers_num)
# -------------------------------------------------------------- #

# --------------------------------- TRAINING --------------------------------- #
# configure the model into a working prototype for training
print "Compiling the model."
# og. optimizer adadelta, now sgd as the paper
# og. loss binary_crossentropy, now mean_squared_error
autoencoder.compile(optimizer=optimizer, loss=loss)
# limit thread usage only at a certain hour
# arg was: datetime.now().hour>=21, now don't do it unless really needed
if (False):
    print "Running at night."
예제 #5
0
파일: train.py 프로젝트: nihofm/ndptmvd
    args = cmdline.parse_args()
    args.name = args.type + '_' + args.loss + '_' + args.name
    print('args:', args)

    # check for GPUs
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f'Using device: {device}')
    if torch.cuda.is_available():
        print(f'Num GPUs: {torch.cuda.device_count()}')

    # load data set and setup data loader
    if 'image' in args.type:
        data_train = data.DataSetTrain(args.x_train, args.y_train,
                                       args.features)
        data_test = data.DataSetTest(args.x_val, args.y_val, args.features)
        model = models.Autoencoder(data_train.input_channels).to(device)
    elif 'dual' in args.type:
        data_train = data.DataSetTrain(args.x_train, args.y_train)
        data_test = data.DataSetTest(args.x_val, args.y_val)
        if args.big:
            model = models.AutoencoderDualF24Big().to(device)
        else:
            model = models.AutoencoderDualF24().to(device)
    elif 'temp' in args.type:
        data_train = data.DataSetTrainTemporal(args.x_train, args.y_train)
        data_test = data.DataSetTest(args.x_val, args.y_val, temporal=True)
        model = models.TemporalAdapter(models.AutoencoderDualF24()).to(device)
    else:
        raise ValueError(f'ERROR: Unsupported network/data type: {args.type}')

    # setup optimizer and loss function