Exemple #1
0
def load_model(file_prefix):
    """
    loads model and associated config file.
    :param file_prefix: file name prefix (without extension) of the model and config file name.
    :return: The model and config files if they exist, None otherwise
    """
    dir_path = os.path.dirname(os.path.realpath(__file__))
    predicate = os.path.join(dir_path, '..\\data', file_prefix)
    full_path_model = predicate + ".pth"
    full_path_cfg = predicate + ".txt"
    exists = os.path.isfile(full_path_model)
    if exists:
        with open(full_path_cfg) as f:
            cfg = f.read()
            # load config file
            cfg = cfg.replace("'", '"')
            cfg = json.loads(cfg)
            num_covariates = cfg['num_covariates']
            num_time_idx = cfg['num_time_idx']
            num_targets = cfg['num_targets']
            input_dim = num_time_idx + num_targets + num_covariates
            # create model object
            model_ = model.model(num_lstms=cfg['num_lstms'],
                                 input_dim=input_dim,
                                 output_dim=cfg['num_targets'],
                                 hidden_dim=cfg['hidden_dim'])
            f.close()
            # load weights and populate state dict for the model
            model_.load_state_dict(torch.load(full_path_model))
            return model_, cfg
    else:
        return None
def execParallelGA(year, region, qntYears=5, times=1):
    """
    Creates the GAModel with SC catalog with parallel and
    distributed island model
    """
    observations = list()
    means = list()
    for i in range(qntYears):
        observation = model.loadModelDB(region + 'jmaData', year + i)
        observation.bins = observation.bins.tolist()
        observations.append(observation)
        means.append(observation.bins)
    mean = np.mean(means, axis=0)
    for i in range(times):
        model_ = model.model()
        model_ = parallelGA.gaModel(
            NGEN=100,
            CXPB=0.9,
            MUTPB=0.1,
            modelOmega=observations,
            year=year +
            qntYears,
            region=region,
            mean=mean,
            FREQ=10,
            tournsize=2,
            n_aval=50000)
        model_.executionNumber = i
        model_.year = year + qntYears
        model_.modelName = region + 'parallelGA'
Exemple #3
0
def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    TEXT = data.Field()
    LABEL = data.Field(sequential=False, dtype=torch.long)

    train, val, test = datasets.SST.splits(TEXT,
                                           LABEL,
                                           fine_grained=True,
                                           train_subtrees=False)

    LABEL.build_vocab(train)

    train_iter, val_iter, test_iter = data.BucketIterator.splits(
        (train, val, test), batch_size=8, device=device)
    TEXT.build_vocab(train, vectors=Vectors(name="vector.txt", cache="./data"))
    criterion = nn.CrossEntropyLoss()
    criterion = criterion.to(device)
    INPUT_DIM = len(TEXT.vocab)
    EMBEDDING_DIM = 300
    HIDDEN_DIM = 768
    OUTPUT_DIM = 5
    for file in os.listdir("./trained_models"):
        print(file)
        checkpoint = torch.load("./trained_models/" + file)
        print(checkpoint)
        for k in checkpoint:
            print(k)
        model = md.model(file[:-9])(INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM,
                                    OUTPUT_DIM).load_state_dict(checkpoint)
        model = model.to(device)
        # Test
        test_loss, test_acc = solver.evaluate(model, test_iter, criterion)
        print(file[:-9] +
              f" Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%")
def execParallelGA(year, region, qntYears=5, times=10):
    """
    Creates the GAModel with SC catalog with parallel and distributed island model
    """
    observations = list()
    means = list()
    for i in range(qntYears):
        observation = model.loadModelDB(region+'jmaData', year+i)
        aux = model.loadModelFromFile('../../Zona3/realSCwithP_AVR/'
                                  + region + 'real' + "_" + str(year + i) + '.txt')    
        aux.values4poisson = [x+1 for x in aux.values4poisson]
        observation.values4poisson = aux.values4poisson
        del aux
        observation.bins = observation.bins.tolist()
        observations.append(observation)
        means.append(observation.bins)
    mean = np.mean(means, axis=0)
    for i in range(times):
        model_=model.model()
        model_ = parallelGAModelP_AVR.gaModel(
            NGEN=10,
            CXPB=0.9,
            MUTPB=0.1,
            modelOmega=observations,
            year=year +
            qntYears,
            region=region,
            mean=mean, 
            n_aval=50)
        model_.executionNumber=i
        model_.year=year+qntYears
        model_.modelName = region+'parallelGA' 
        parallelGA_ = model.loadModelDB(region+'parallelGA', year)
Exemple #5
0
def train(args):
    # Pre-processing
    imdbMat = scipy.io.loadmat('imdb_crop/imdb.mat')
    imdbPlace = imdbMat['imdb'][0][0]
    place = imdbPlace
    # Todo remove redundant variable
    # place = imdbMat['imdb'][0][0]
    where = 'imdb_crop'
    img_loc = []
    corr_ages = []

    for i in range(460723):
        # print(place[0][0][i])
        bYear = int(place[0][0][i] / 365)  # birth year
        # print(bYear)
        taken = place[1][0][i]  # photo taken
        # print(taken)
        path = place[2][0][i][0]
        age = taken - bYear
        img_loc.append(os.path.join(where, path))
        corr_ages.append(age)

    df = pd.DataFrame(img_loc, columns=['Image Location'])
    df['Age'] = corr_ages

    # Training
    if args.pre_trained == 'facenet':
        from models.Face_recognition import FR_model
        FR = FR_model()
        Model = model()
        Model.compile(loss='mean_absolute_error', optimizer='adam')
        # training loop
        length = len(df)
        print("length are {}".format(length))
        assert length > 0
        batch_size = args.batch_size
        n_batches = length // batch_size
        epochs = args.epochs
        iters = (int)(epochs * n_batches)
        assert iters > 0
        print("iters are {}".format(iters))
        for i in range(iters):
            X, Y = get_images(batch_size, df['Image Location'], df['Age'], (args.img_size, args.img_size))
            X = np.array(X)
            Y = np.array(Y)
            X = FR(X)

            assert X.shape == (batch_size, 128), 'expected shape {} O/p shape {}'.format((batch_size, 128), X.shape)
            history = Model.fit(X, Y, batch_size, 1, verbose=0)
            if (i + 1) % args.log_step == 0:
                print("Iters [{}/{}] Loss {} Batch size {}   ".format(i + 1, iters, history.history['loss'],
                                                                      args.batch_size))

        Model.save(args.save_path)
Exemple #6
0
num_covariates = cfg['num_covariates']
num_time_idx = cfg['num_time_idx']
num_targets = cfg['num_targets']
input_dim = num_time_idx + num_targets + num_covariates
# Total context window size used for training. The context window consists of 1. conditioning_context where input
# data points are available and network predictions are conditioned on actual input data, and 2. prediction_context,
# where the network predictions are conditioned on network output at the previous step. Covariates are assumed
# to be available for the entire context window
ctx_win_len = cfg['ctx_win_len']
cond_win_len = cfg['cond_win_len']
pred_win_len = ctx_win_len - cond_win_len - 1
batch_size = cfg['batch_size']

model = model.model(num_lstms=cfg['num_lstms'],
                    input_dim=input_dim,
                    output_dim=cfg['num_targets'],
                    hidden_dim=cfg['hidden_dim']).to(device)

criterion = torch.nn.MSELoss()
optimizer = optim.Adam(model.parameters(), cfg['lr'])
scheduler = optim.lr_scheduler.StepLR(optimizer,
                                      step_size=cfg['lr_step_size'],
                                      gamma=cfg['lr_gamma'])

train_sampler, test_sampler = dataset.get_train_test_samplers(
    cfg['train_test_split'])

train_dataloader = DataLoader(dataset,
                              batch_size=batch_size,
                              sampler=train_sampler,
                              shuffle=False,
Exemple #7
0
def wrapper(param, data_x, data_y, length_x, learning_rate, lr_gamma,
            hidden_dim, layers):
    device = param['device']
    model_name = param['model']
    brain = param['brain_region']
    input_dim = param['region_n']
    n_epochs = param['n_epochs']
    minmax_y = param['minmax_y']
    rate_tr = param['rate_train']
    rate_va = param['rate_valid']
    rate_te = param['rate_test']
    now_time = param['present_time']
    train_num = param['number_train']
    valid_num = param['number_valid']
    test_num = param['number_test']
    layer_rate = learning_rate
    total_num = len(data_y)

    if train_num % rate_tr != 0:
        print('Please reset rate_train')
    if valid_num % rate_va != 0:
        print('Please reset rate_valid')
    if test_num % rate_te != 0:
        print('Please reset rate_test')

    cwd = os.getcwd()
    out_fname = f'{now_time}_h_{hidden_dim}_l_{layers}_lg_{lr_gamma}' \
                f'_n_{n_epochs}_lr{layer_rate}_model{model_name}'
    out_path = os.path.join(cwd, out_fname)
    safe_make_dir(out_path)
    temp_path = os.path.join(out_path, 'temp')
    safe_make_dir(temp_path)

    start = time.time()  # Start Learning
    print("Start Learning " + out_fname)
    output_dim = 1

    loss_list = []
    step_list = []
    epoch_list = []

    if brain == 'right' or brain == 'left':
        input_dim = input_dim // 2

    mynet = model(param, input_dim, hidden_dim, output_dim, layers, device)

    criterion = nn.MSELoss().to(device)
    optimizer = torch.optim.Adam(mynet.parameters(), lr=layer_rate)
    lr_sche = torch.optim.lr_scheduler.StepLR(optimizer,
                                              step_size=100, gamma=lr_gamma)

    train_xdata = data_x[0:train_num, :, :]
    train_ydata = data_y[0:train_num]
    train_length_x = length_x[0:train_num]
    valid_xdata = data_x[train_num:train_num + valid_num, :, :]
    valid_ydata = data_y[train_num:train_num + valid_num]
    valid_length_x = length_x[train_num:train_num + valid_num]
    total_loss_valid_min = np.Inf

    for i in range(n_epochs):
        # Train
        mynet.train()
        loss = 0

        for tr in range(int(train_num / rate_tr)):
            train_x_tensor, train_y_tensor, train_length_tensor = train(
                device, tr*rate_tr, rate_tr, train_xdata, train_ydata,
                train_length_x)
            if model_name != 'FC':
                train_x_tensor = pack_padded_sequence(
                    train_x_tensor, train_length_tensor,
                    batch_first=True, enforce_sorted=False)
            optimizer.zero_grad()

            outputs = mynet(train_x_tensor)
            loss_train = criterion(outputs, train_y_tensor)
            loss_train.backward()

            optimizer.step()
            loss += float(loss_train)

        epoch_list.append(i)
        loss_list.append(loss)
        step_list.append('train')

        # Validation
        mynet.eval()
        valid_loss = 0

        for va in range(int(valid_num / rate_va)):
            valid_x_tensor, valid_y_tensor, valid_length_tensor = \
                valid(device, va*rate_va, rate_va, valid_xdata,
                      valid_ydata, valid_length_x)
            if model_name != 'FC':
                valid_x_tensor = pack_padded_sequence(
                    valid_x_tensor, valid_length_tensor,
                    batch_first=True, enforce_sorted=False)

            valid_result = mynet(valid_x_tensor)
            loss_valid = criterion(valid_result, valid_y_tensor)
            valid_loss = valid_loss + loss_valid

        epoch_list.append(i)
        loss_list.append(valid_loss.item())
        step_list.append('validation')

        if valid_loss.item() <= total_loss_valid_min:
            torch.save(mynet.state_dict(), os.path.join(temp_path, 'model.pt'))
            total_loss_valid_min = valid_loss.item()

        lr_sche.step()

    epoch_arr = np.array(epoch_list)
    loss_arr = np.array(loss_list)
    step_arr = np.array(step_list)

    # Write loss values in csv file
    write_loss(epoch_arr, loss_arr, step_arr, out_path)

    # Plot train and validation losses
    plot_train_val_loss(out_path, out_fname, dpi=800,
                        yscale='log', ylim=[0.0001, 10])

    end = time.time()  # Learning Done
    print(f"Learning Done in {end-start}s")

    # Test
    mynet.load_state_dict(torch.load(os.path.join(temp_path, 'model.pt')))

    mynet.eval()
    with torch.no_grad():
        test_x_tensor, test_y_tensor, test_length_tensor = get_tensor(
            device, data_x, data_y, length_x, train_num + valid_num, total_num)
        if model_name != 'FC':
            test_x_tensor = pack_padded_sequence(
                test_x_tensor, test_length_tensor,
                batch_first=True, enforce_sorted=False)

        test_result = mynet(test_x_tensor)
        test_loss = criterion(test_result, test_y_tensor)
        print(f"Test Loss: {test_loss.item()}")
    plot_result(test_y_tensor, test_result, minmax_y, out_path, out_fname)

    real_arr = normalize_tensor(test_y_tensor, minmax_y)[:, -1]
    result_arr = normalize_tensor(test_result, minmax_y)[:, -1]
    df_result = pd.DataFrame({'test_age': real_arr, 'real_age': result_arr})
    df_result.to_csv(os.path.join(out_path, 'test_vs_real.csv'))
Exemple #8
0
def main(argv):
    
    data_path = None
    input_image = None
    output = None
    weight_path = None
    mode=5
    drop_rate=0
    lab=s.lab
    classification=False
    temp=.4
    try:
        opts, args = getopt.getopt(argv,"w:p:b:m:ld:ct:i:o:",["weight-path=", "datapath=",'model=','lab','drop-rate=','input=','output='])
    except getopt.GetoptError as error:
        print(error)
        print( 'demo.py -w <path to weights file> -p <path to folder of images> OR -i <path to single image> -l <no argument. use if lab should be used>\
            -d <amount of dropout used in model> -c <no argument. Use if model is classifier> -t <temperature for annealed mean> -o <output path for images>')
        sys.exit(2)
    print("opts", opts)
    for opt, arg in opts:
        if opt in ("-w", "--weight-path"):
            weight_path = arg
        elif opt in ("--datapath", "-p"):
            data_path = arg
        elif opt=='-m':
            if arg in ('custom','0'):
                mode = 0
            elif arg in ('u','1','unet'):
                mode = 1
            elif arg in ('ende','2'):
                mode = 2
            elif arg in ('richzhang','classende','3'):
                mode = 3
            elif arg in ('colorunet','cu','4'):
                mode = 4
            elif arg in ('mu','5','middle'):
                mode = 5
        elif opt in ('-l','--lab'):
            lab=True
        elif opt in ("-d", "--drop-rate"):
            drop_rate = float(arg) 
        elif opt =='-c':
            classification=True
            lab=True
        elif opt=='-t':
            temp=float(arg)
        elif opt in ('-i','--input'):
            input_image = arg
        elif opt in ('-o','--output'):
            output = arg

    if data_path is None and input_image is None:
        print('Please select an image or folder')
        sys.exit()
    trafo=transforms.Compose([transforms.Grayscale(3 if lab else 1), transforms.Resize((96,96))])
    device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    if data_path is not None: 
        dataset = ImageDataset(data_path,lab=lab,pretrafo=trafo)
        loader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0)
    if input_image is not None:
        img=trafo(Image.open(input_image))
        if lab:
            img=color.rgb2lab(np.asarray(img)/255)[...,:1]-np.array([50])[None,None,:]
        loader = [(transforms.ToTensor()(img)[None,...].float(),input_image)]
        
    
    classes=(150 if classification else 2) if lab else 3

    #define model
    UNet=None
    zoom=False
    if mode == 0:
        UNet=model(col_channels=classes) 
    elif mode ==1:
        UNet=unet(drop_rate=drop_rate,classes=classes)
    elif mode ==2:
        UNet=generator(drop_rate,classes)
    elif mode == 3:
        UNet=richzhang(drop_rate,classes)
        zoom=True
    elif mode == 4:
        UNet=color_unet(True,drop_rate,classes)
    elif mode == 5:
        UNet = middle_unet(True,drop_rate,classes)
    #load weights
    try:
        UNet.load_state_dict(torch.load(weight_path, map_location=device))
        print("Loaded network weights from", weight_path)
    except FileNotFoundError:
        print("Did not find weight files.")
        sys.exit()
    outpath=None
    UNet.to(device)  
    UNet.eval()
    with torch.no_grad():
        for i,(X,name) in enumerate(loader):
            X=X.to(device)
            unet_col=UNet(X)
            col=show_colorization(unet_col,original=X,lab=lab,cl=classification,zoom=zoom,T=temp,return_img=output is not None)
            if output:
                try:
                    fp,f=os.path.split(name)
                except TypeError:
                    fp,f=os.path.split(name[0])
                n,e=f.split('.')
                f='.'.join((n+'_color','png'))
                outpath=output if os.path.isdir(output) or os.path.isdir(os.path.basename(output)) else fp
                Image.fromarray(toInt(col[0])).save(os.path.join(outpath,f))
    if output:
        print('Finished colorization. Go to "%s" to see the colorized version(s) of the image(s)'%os.path.realpath(outpath))
Exemple #9
0
def main():
    start_epoch = 1
    saver_dir = mk_save(save_dir, cfg_dir)
    logging = init_log(saver_dir)
    _print = logging.info

    ################
    # read dataset #
    ################
    trainset, testset, trainloader, testloader = dataloader(data_dir, 0)

    ################
    # define model #
    ################
    net = model.model()

    ##########
    # resume #
    ##########

    if resume:
        ckpt = torch.load(resume)
        net_dict = net.state_dict()
        pre_dict = {
            k: v
            for k, v in ckpt['state_dict'].items() if k in net_dict
        }
        net_dict.update(pre_dict)
        net.load_state_dict(net_dict)
        start_epoch = ckpt['epoch'] + 1
        print('resume', start_epoch)
        start_epoch = START_EPOCH

    criterion = nn.CrossEntropyLoss().cuda()
    kd = nn.KLDivLoss().cuda()

    #####################
    # TODO:num of parameters #
    #####################
    params_count(net)

    #############################
    #TODO: OPTIMIZER FOR BN SLIMMING #
    #############################
    slim_params = params_extract(net)

    net = DataParallel(net.cuda())

    for epoch in range(start_epoch, 500):
        """  
        ##########################  train the model  ###############################
        """
        _print('--' * 50)
        net.train()
        for i, data in enumerate(trainloader):
            # warm up Learning rate
            lr = warm_lr(i, epoch, trainloader)

            ########################
            # Define Optimizer #
            ########################
            optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad,
                                               net.parameters()),
                                        lr=lr,
                                        momentum=0.9,
                                        weight_decay=WD,
                                        nesterov=True)

            img, label = data[0].cuda(), data[1].cuda()
            batch_size = img.size(0)
            optimizer.zero_grad()
            L1_norm = 0.

            logits = net(img)

            #############
            # L1 penaty #
            #############
            L1_norm = sum([L1_penalty(m).cuda() for m in slim_params])
            loss = criterion(logits, label) + LAMBDA1 * L1_norm
            loss.backward()
            optimizer.step()

            progress_bar(i,
                         len(trainloader),
                         loss.item(),
                         L1_norm,
                         lr,
                         msg='train')

        ##########################  evaluate net and save model  ###############################
        if epoch % SAVE_FREQ == 0:
            """
            # evaluate net on train set  
            """
            net.eval()
            train_loss, train_correct, total = test(trainloader)

            train_acc = float(train_correct) / total
            train_loss = train_loss / total
            total_loss = total_loss / total

            _print(
                'epoch:{} - train_loss: {:.4f}, train acc: {:.4f}, L1:{:.4f}, lr:{:.6f}, total sample: {}'
                .format(epoch, train_loss, train_acc, L1_norm, lr, total))
            """
            # evaluate net on test set  
            """
            test_loss, test_correct, total = test(testloader)

            test_acc = float(test_correct) / total
            test_loss = test_loss / total
            _print(
                'epoch:{} - test loss: {:.4f} and test acc: {:.4f} total sample: {}'
                .format(epoch, test_loss, test_acc, total))

            ##########################  save model  ###############################
            net_state_dict = net.module.state_dict()
            if not os.path.exists(save_dir):
                os.mkdir(save_dir)
            torch.save(
                {
                    'epoch': epoch,
                    'train_loss': train_loss,
                    'train_acc': train_acc,
                    'test_loss': test_loss,
                    'test_acc': test_acc,
                    'L1': L1_norm,
                    'state_dict': net_state_dict
                }, os.path.join(save_dir, '%03d.ckpt' % epoch))
    print('finishing training')
def main(argv):
    # setting argument defaults
    mbsize = s.batch_size
    report_freq = s.report_freq
    weight_path = s.weights_path
    weights_name = s.weights_name
    lr = s.learning_rate
    save_freq = s.save_freq
    mode = 3
    epochs = s.epochs
    beta1, beta2 = s.betas
    infinite_loop = s.infinite_loop
    data_path = s.data_path
    drop_rate = 0
    lab = s.lab
    load_list = s.load_list
    help = 'train_regression.py -b <batch size> -e <amount of epochs to train. standard: infinite> -r <report frequency> -w <path to weights folder> \
            -n <name> -s <save freq.> -l <learning rate> -p <path to data set> -d <dropout rate> -m <mode: differnet models> --beta1 <beta1 for adam>\
            --beta2 <beta2 for adam> --lab <No argument. If used lab colorspace is used> \
            --lambda <hyperparameter for class weights>'

    try:
        opts, args = getopt.getopt(argv, "he:b:r:w:l:s:n:m:p:d:i:", [
            'epochs=', "mbsize=", "report-freq=", 'weight-path=', 'lr=',
            'save-freq=', 'weight-name=', 'mode=', 'data_path=', 'drop_rate='
            'beta1=', 'beta2=', 'lab', 'image-loss-weight=', 'load-list'
        ])
    except getopt.GetoptError:
        print(help)
        sys.exit(2)
    print("opts", opts)
    for opt, arg in opts:
        if opt == '-h':
            print(help)
            sys.exit()
        elif opt in ("-b", "--mbsize"):
            mbsize = int(arg)
        elif opt in ("-e", "--epochs"):
            epochs = int(arg)
            infinite_loop = False
        elif opt in ('-r', '--report-freq'):
            report_freq = int(arg)
        elif opt in ("-w", "--weight-path"):
            weight_path = arg
        elif opt in ("-n", "--weight-name"):
            weights_name = arg
        elif opt in ("-s", "--save-freq"):
            save_freq = int(arg)
        elif opt in ("-l", "--lr"):
            lr = float(arg)
        elif opt == '-m':
            if arg in ('custom', '0'):
                mode = 0
            elif arg in ('u', '1', 'unet'):
                mode = 1
            elif arg in ('ende', '2'):
                mode = 2
            elif arg in ('color', '3', 'cu'):
                mode = 3
        elif opt in ("-p", "--data_path"):
            data_path = str(arg)
        elif opt in ("-d", "--drop_rate"):
            drop_rate = float(arg)
        elif opt == '--beta1':
            beta1 = float(arg)
        elif opt == '--beta2':
            beta2 = float(arg)
        elif opt == '--lab':
            lab = True
        elif opt in ('--load-list'):
            load_list = True

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    dataset = None
    if 'cifar' in data_path:
        in_size = 32
        dataset = 0
    elif 'places' in data_path:
        in_size = 224
        dataset = 1
    elif 'stl' in data_path:
        in_size = 96
        dataset = 2
    in_shape = (3, in_size, in_size)

    #out_shape=(s.classes,32,32)
    betas = (beta1, beta2)
    weight_path_ending = os.path.join(weight_path, weights_name + '.pth')

    loss_path_ending = os.path.join(weight_path,
                                    weights_name + "_" + s.loss_name)

    trainset = load_trainset(data_path, lab=lab, load_list=load_list)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=mbsize,
                                              shuffle=True,
                                              num_workers=2)

    print("NETWORK PATH:", weight_path_ending)
    #define output channels of the model
    classes = 2 if lab else 3
    #define model
    UNet = None

    if mode == 0:
        UNet = model(col_channels=classes)
    elif mode == 1:
        UNet = unet(drop_rate=drop_rate, classes=classes)
    elif mode == 2:
        UNet = generator(drop_rate, classes)
    elif mode == 3:
        UNet = color_unet(True, drop_rate, classes)
    #load weights
    try:
        UNet.load_state_dict(
            torch.load(weight_path_ending, map_location=device))
        print("Loaded network weights from", weight_path)
    except FileNotFoundError:
        print("Initialize new weights for the generator.")

    UNet.to(device)

    #save the hyperparameters to a JSON-file for better oranization
    model_description_path_ending = os.path.join(weight_path,
                                                 s.model_description_name)
    # initialize model dict
    try:
        with open(model_description_path_ending, "r") as file:
            model_dict = json.load(file)
    except FileNotFoundError:
        model_dict = {}

    prev_epochs = 0
    # save settings in dict if new weights are beeing initialized
    if not weights_name in model_dict.keys():
        model_dict[weights_name] = {
            "loss_name": loss_path_ending,
            "epochs": 0,
            "batch_size": mbsize,
            "lr": lr,
            "lab": lab,
            "betas": betas,
            "model": ['custom', 'unet', 'encoder-decoder', 'color-unet'][mode]
        }
    else:
        #load specified parameters from model_dict
        params = model_dict[weights_name]
        mbsize = params['batch_size']
        betas = params['betas']
        lr = params['lr']
        lab = params['lab']
        loss_path_ending = params['loss_name']
        #memorize how many epochs already were trained if we continue training
        prev_epochs = params['epochs'] + 1

    #optimizer
    optimizer_g = optim.Adam(UNet.parameters(), lr=lr, betas=betas)
    # l1 loss
    l1loss = nn.L1Loss().to(device)
    loss_hist = []

    UNet.train()
    gray = torch.tensor([0.2989, 0.5870, 0.1140])[:, None, None].float()
    # run over epochs
    for e in (range(prev_epochs, prev_epochs +
                    epochs) if not infinite_loop else count(prev_epochs)):
        g_running = 0
        #load batches
        for i, batch in enumerate(trainloader):
            if dataset == 0:  #cifar 10
                (image, _) = batch
            elif dataset in (1, 2):  #places and stl 10
                image = batch

            X = None
            #differentiate between the two available color spaces RGB and Lab
            if lab:
                if dataset == 0:  #cifar 10
                    image = np.transpose(image, (0, 2, 3, 1))
                    image = np.transpose(color.rgb2lab(image), (0, 3, 1, 2))
                    image = torch.from_numpy(
                        (image +
                         np.array([-50, 0, 0])[None, :, None, None])).float()
                X = torch.unsqueeze(image[:, 0, :, :], 1).to(
                    device)  #set X to the Lightness of the image
                image = image[:,
                              1:, :, :].to(device)  #image is a and b channel
            else:
                #convert to grayscale image
                #using the matlab formula: 0.2989 * R + 0.5870 * G + 0.1140 * B and load data to gpu
                X = (image.clone() * gray).sum(1).to(device).view(
                    -1, 1, *in_shape[1:])
                image = image.float().to(device)
            #----------------------------------------------------------------------------------------
            ################################### Unet optimization ###################################
            #----------------------------------------------------------------------------------------
            #clear gradients
            optimizer_g.zero_grad()
            #generate colorized version with unet
            unet_col = None
            #print(X.shape,image.shape,classes)
            if mode == 0:
                unet_col = UNet(torch.stack((X, X, X), 1)[:, :, 0, :, :])
            else:
                unet_col = UNet(X)
            #calculate how close the generated pictures are to the ground truth
            loss_g = l1loss(unet_col, image)
            #backpropagation
            loss_g.backward()
            optimizer_g.step()

            g_running += loss_g.item()
            loss_hist.append([e, i, loss_g.item()])

            #report running loss
            if (i + len(trainloader) * e) % report_freq == report_freq - 1:
                print('Epoch %i, batch %i: \tunet loss=%.2e' %
                      (e + 1, i + 1, g_running / report_freq))
                g_running = 0

            if s.save_weights and (
                    i + len(trainloader) * e) % save_freq == save_freq - 1:
                #save parameters
                try:
                    torch.save(UNet.state_dict(), weight_path_ending)
                except FileNotFoundError:
                    os.makedirs(weight_path)
                    torch.save(UNet.state_dict(), weight_path_ending)
                print("Parameters saved")

                if s.save_loss:
                    #save loss history to file
                    try:
                        f = open(loss_path_ending, 'a')
                        np.savetxt(f, loss_hist, '%e')
                        f.close()
                    except FileNotFoundError:
                        os.makedirs(s.loss_path)
                        np.savetxt(loss_path_ending, loss_hist, '%e')
                    loss_hist = []

        #update epoch count in dict after each epoch
        model_dict[weights_name]["epochs"] = e
        #save it to file
        try:
            with open(model_description_path_ending, "w") as file:
                json.dump(model_dict, file, sort_keys=True, indent=4)
        except:
            print('Could not save to model dictionary (JSON-file)')
Exemple #11
0
def main(argv):

    data_path = s.data_path
    weight_path = s.weights_path
    mode = 1
    drop_rate = 0
    lab = s.lab
    classification = False
    temp = .4
    try:
        opts, args = getopt.getopt(argv, "h:w:p:b:m:ld:ct:", [
            "help", "weight-path=", "datapath=", 'model=', 'lab', 'drop-rate='
        ])
    except getopt.GetoptError as error:
        print(error)
        print(
            'test.py -w <path to weights file> -p <path to dataset> -l <no argument. use if lab should be used> -m <mode: different models>\
            -d <amount of dropout used in model> -c <no argument. Use if model is classifier> -t <temperature for annealed mean> -o <output path for images>'
        )
        sys.exit(2)
    print("opts", opts)
    for opt, arg in opts:
        if opt == '-h':
            print('test.py -i <Boolean> -s <Boolean>')
            sys.exit()
        elif opt in ("-w", "--weight-path"):
            weight_path = arg
        elif opt in ("--datapath", "-p"):
            data_path = arg
        elif opt in ("--batchnorm", "-b"):
            batch_norm = arg in ["True", "true", "1"]
        elif opt == '-m':
            if arg in ('custom', '0'):
                mode = 0
            elif arg in ('u', '1', 'unet'):
                mode = 1
            elif arg in ('ende', '2'):
                mode = 2
            elif arg in ('richzhang', 'classende', '3'):
                mode = 3
            elif arg in ('colorunet', 'cu', '4'):
                mode = 4
            elif arg in ('mu', '5', 'middle'):
                mode = 5
        elif opt in ('-l', '--lab'):
            lab = True
        elif opt in ("-d", "--drop-rate"):
            drop_rate = float(arg)
        elif opt == '-c':
            classification = True
            lab = True
        elif opt == '-t':
            temp = float(arg)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    dataset = None
    if data_path == './cifar-10':
        in_size = 32
        dataset = 0
    elif 'places' in data_path:
        in_size = 224
        dataset = 1
    elif 'stl' in data_path:
        in_size = 96
        dataset = 2
    in_shape = (3, in_size, in_size)
    #out_shape=(s.classes,32,32)

    trainset = load_trainset(data_path, train=False, lab=lab)
    trainloader = torch.utils.data.DataLoader(
        trainset,
        batch_size=3,
        shuffle=True,
        num_workers=2 if dataset in (0, 1) else 0)
    print("Loaded dataset from", data_path)
    classes = (150 if classification else 2) if lab else 3

    #define model
    UNet = None
    zoom = False
    if mode == 0:
        UNet = model(col_channels=classes)
    elif mode == 1:
        UNet = unet(drop_rate=drop_rate, classes=classes)
    elif mode == 2:
        UNet = generator(drop_rate, classes)
    elif mode == 3:
        UNet = richzhang(drop_rate, classes)
        zoom = True
    elif mode == 4:
        UNet = color_unet(True, drop_rate, classes)
    elif mode == 5:
        UNet = middle_unet(True, drop_rate, classes)
    #load weights
    try:
        UNet.load_state_dict(torch.load(weight_path, map_location=device))
        print("Loaded network weights from", weight_path)
    except FileNotFoundError:
        print("Did not find weight files.")
        #sys.exit(2)

    UNet.to(device)
    UNet.eval()
    gray = torch.tensor([0.2989, 0.5870, 0.1140])[:, None, None].float()
    with torch.no_grad():
        for i, batch in enumerate(trainloader):
            if dataset == 0:  #cifar 10
                (image, _) = batch
            elif dataset in (1, 2):  #places
                image = batch
            X = None
            if lab:
                if dataset == 0:  #cifar 10
                    image = np.transpose(image, (0, 2, 3, 1))
                    image = np.transpose(color.rgb2lab(image), (0, 3, 1, 2))
                    image = torch.from_numpy(
                        (image -
                         np.array([50, 0, 0])[None, :, None, None])).float()
                X = torch.unsqueeze(image[:, 0, :, :], 1).to(
                    device)  #set X to the Lightness of the image
                image = image[:, 1:, :, :]  #image is a and b channel
            else:
                #convert to grayscale image

                #using the matlab formula: 0.2989 * R + 0.5870 * G + 0.1140 * B and load data to gpu
                X = (image.clone() * gray).sum(1).to(device).view(
                    -1, 1, *in_shape[1:])
                image = image.float()
            #print(X.min(),X.max())
            #generate colorized version with unet
            #for arr in (image[:,0,...],image[:,1,...],X):
            #    print(arr.min(),arr.max())
            try:
                unet_col = UNet(X)
            except:
                unet_col = UNet(torch.stack((X, X, X), 1)[:, :, 0, :, :])
            #for arr in (unet_col[0,...],unet_col[1,...]):
            #    print(arr.min().item(),arr.max().item())
            show_colorization(unet_col,
                              image,
                              X,
                              lab=lab,
                              cl=classification,
                              zoom=zoom,
                              T=temp)
Exemple #12
0
logger.addHandler(rf_handler)

amBegin = dt.datetime.combine(dt.datetime.now().date(),
                              dt.time(hour=9, minute=30, second=30))
amEnd = dt.datetime.combine(dt.datetime.now().date(),
                            dt.time(hour=11, minute=30, second=0))
pmBegin = dt.datetime.combine(dt.datetime.now().date(),
                              dt.time(hour=13, minute=0, second=0))
pmEnd = dt.datetime.combine(dt.datetime.now().date(),
                            dt.time(hour=15, minute=0, second=0))
buyBegin = dt.datetime.combine(dt.datetime.now().date(),
                               dt.time(hour=9, minute=44, second=25))

oneMinuteOpenTime = 0
allStocksOpen = None
model = model.model(log=logger)
dfcf = dfcf_data.IndustryData()
cyb_b = leastsq.cyb_result()
sz50_k, sz50_b = leastsq.sz50_result()
past_config = configparser.ConfigParser()  # 注意大小写
past_config.read("config\\past.info")  # 配置文件的路径
keep_alive_time = 0

while True:
    try:
        now = dt.datetime.now()
        judgd_factors = factors.judge_industry(dfcf)
        mean = judgd_factors.get_six_industry()
        a50_f = judgd_factors.sz50_factor()
        cyb_f = judgd_factors.cyb_factor()
        cyb_real = judgd_factors.cyb_real()
Exemple #13
0
def main():
    parser = argparse.ArgumentParser(description="SST")
    parser.add_argument("--load", help="Load the saved model")
    parser.add_argument("--save-path",
                        default="model.pkl",
                        help="Path to save model")
    parser.add_argument("--model",
                        default="naive_lstm",
                        help="Choose the model")
    parser.add_argument("--max-epoch",
                        type=int,
                        default=200,
                        help="Restrict max epochs")
    parser.add_argument("--batch-size",
                        type=int,
                        default=8,
                        help="Training batch size")
    parser.add_argument(
        "--log-interval",
        type=int,
        default=10,
        help="Log interval when training in batches",
    )
    parser.add_argument("--save-interval",
                        type=int,
                        default=1,
                        help="Model save interval when training")
    parser.add_argument("--optim",
                        default="sgd",
                        help="Optimizer. Choose between adam and sgd")
    parser.add_argument("--lr", type=float, default=0.1, help="Learning rate")

    args = parser.parse_args()

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    ################################
    # DataLoader
    ################################

    # set up fields
    TEXT = data.Field()
    LABEL = data.Field(sequential=False, dtype=torch.long)

    # make splits for data
    # DO NOT MODIFY: fine_grained=True, train_subtrees=False
    train, val, test = datasets.SST.splits(TEXT,
                                           LABEL,
                                           fine_grained=True,
                                           train_subtrees=False)
    # build the vocabulary
    # you can use other pretrained vectors, refer to https://github.com/pytorch/text/blob/master/torchtext/vocab.py
    TEXT.build_vocab(train, vectors=Vectors(name="vector.txt", cache="./data"))
    LABEL.build_vocab(train)
    # We can also see the vocabulary directly using either the stoi (string to int) or itos (int to string) method.
    # make iterator for splits
    train_iter, val_iter, test_iter = data.BucketIterator.splits(
        (train, val, test), batch_size=args.batch_size, device=device)

    # Copy the pre-trained word embeddings we loaded earlier into the embedding layer of our model.
    pretrained_embeddings = TEXT.vocab.vectors
    print("pretrained_embeddings.shape: ", pretrained_embeddings.shape)
    INPUT_DIM = len(TEXT.vocab)
    EMBEDDING_DIM = pretrained_embeddings.shape[1]
    HIDDEN_DIM = 768
    OUTPUT_DIM = 5

    model = md.model(args.model)(INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM,
                                 OUTPUT_DIM)
    # you should maintain a nn.embedding layer in your network
    model.embedding.weight.data.copy_(pretrained_embeddings)
    print(
        f"The model has {solver.count_parameters(model):,} trainable parameters"
    )

    # Training
    opitmizer = optim.SGD(model.parameters(), 0.1)
    criterion = nn.CrossEntropyLoss()

    model = model.to(device)
    criterion = criterion.to(device)

    num_of_epochs = args.max_epoch
    best_valid_loss = float("inf")
    train_loss_list, train_acc_list = [], []
    val_loss_list, val_acc_list = [], []
    for epoch in range(num_of_epochs):
        start_time = time.time()
        train_loss, train_acc = solver.train_one_epoch(model, train_iter,
                                                       opitmizer, criterion)
        valid_loss, valid_acc = solver.evaluate(model, val_iter, criterion)
        train_loss_list.append(train_loss)
        train_acc_list.append(train_acc)
        val_loss_list.append(valid_loss)
        val_acc_list.append(valid_acc)
        end_time = time.time()
        epoch_mins, epoch_secs = solver.epoch_time(start_time, end_time)

        if valid_loss < best_valid_loss:
            best_valid_loss = valid_loss
            torch.save(model.state_dict(),
                       "./trained_models/" + args.model + "-model.pt")

        print(f"Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s")
        print(
            f"\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%"
        )
        print(
            f"\t Val. Loss: {valid_loss:.3f} |  Val. Acc: {valid_acc*100:.2f}%"
        )

    # Test
    test_loss, test_acc = solver.evaluate(model, test_iter, criterion)
    print(f"Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%")

    # Save Result
    result_dict = {
        args.model + " Training": [train_loss_list, train_acc_list],
        args.model + " Validating": [val_loss_list, val_acc_list],
    }

    save_result(result_dict, args.model + "_result")
    plot.plot_loss_and_acc_save(result_dict, "./plots/" + args.model)