Exemplo n.º 1
0
def test(station, test_dataset, max_values, type):
    if type == 0:
        feature_dim = 6
    else:
        feature_dim = 3
    model = BaseModel(feature_dim, use_length, rnn_hid_dim)
    model.load_state_dict(torch.load('./files/params_' + station + '.pkl'))
    for start in range(0, len(test_dataset) - predict_time):
        x, t = test_dataset[start]
        acc = []
        PM25 = []
        PM10 = []
        O3 = []
        PM25_actual = []
        PM10_actual = []
        O3_actual = []
        for i in range(0, predict_time):
            _, t = test_dataset[start + i]
            x = Variable(x, volatile=True)
            x = x.unsqueeze(0)
            output = model(x)
            output = output.squeeze(0)
            output = output.data
            x = x.squeeze(0)
            x = np.vstack((x[1:], output[-1]))
            x = torch.from_numpy(x)
            out = output[-1].numpy()
            tar = t[-1].numpy()
            if (out[0] <= 0):
                out[0] = 0
            if (out[1] <= 0):
                out[1] = 0
            if (out[2] <= 0):
                out[2] = 0
            out = np.multiply(out, max_values)
            tar = np.multiply(tar, max_values)
            PM25.append(out[0])
            PM10.append(out[1])
            if type == 0:
                O3.append(out[2])
            else:
                O3.append(0)
            PM25_actual.append(tar[0])
            PM10_actual.append(tar[1])
            O3_actual.append(tar[2])
        acc.append(smape(PM25, PM25_actual))
        acc.append(smape(PM10, PM10_actual))
        acc.append(smape(O3, O3_actual))
        print(acc)
Exemplo n.º 2
0
def predict(station, test_data, max_values, type):
    if type == 0:
        feature_dim = 6
    else:
        feature_dim = 3
    model = BaseModel(feature_dim, use_length, rnn_hid_dim)
    model.load_state_dict(torch.load('./files/params_' + station + '.pkl'))
    x = test_data
    #x, t = x.unsqueeze(0), t.unsqueeze(0)
    PM25 = []
    PM10 = []
    O3 = []
    for i in range(0, predict_time):
        x = Variable(x, volatile=True)
        x = x.unsqueeze(0)
        output = model(x)
        output = output.squeeze(0)
        output = output.data
        x = x.squeeze(0)
        x = np.vstack((x[1:], output[-1]))
        x = torch.from_numpy(x)
        out = output[-1].numpy()
        out = np.multiply(out, max_values)
        if (out[0] <= 0):
            out[0] = 0
        if (out[1] <= 0):
            out[1] = 0
        if (out[2] <= 0):
            out[2] = 0
        PM25.append(out[0])
        PM10.append(out[1])
        if type == 0:
            O3.append(out[2])
        else:
            O3.append(0)
    output_dict = {}
    output_dict['station_id'] = station
    output_dict['PM2.5'] = PM25
    output_dict['PM10'] = PM10
    output_dict['O3'] = O3
    if type == 0:
        beijing_output.append(output_dict)
    else:
        london_output.append(output_dict)
Exemplo n.º 3
0
def demo_test():
    feature_dim = 6
    model = BaseModel(feature_dim, use_length, rnn_hid_dim)
    model.load_state_dict(torch.load('params.pkl'))
    train_dataset, valid_dataset, max_values = make_train_valid_dataset(
        'bj_aq.npy', use_length)
    # sample
    x, t = valid_dataset[0]
    #x = Variable(x, volatile=True).cuda()
    #t = Variable(t, volatile=True).cuda()
    x = Variable(x, volatile=True)
    t = Variable(t, volatile=True)
    x, t = x.unsqueeze(0), t.unsqueeze(0)
    output = model(x)
    output = output.squeeze(0)
    output = output.data
    x, t = x.squeeze(0), t.squeeze(0)
    current = np.multiply(x, max_values)
    output = np.multiply(output, max_values)
    target = np.multiply(t, max_values)

    print(current)
    print(target)
    print(output)
    test_loader = make_data_loader(test_dataset,
                                   args.batch_size,
                                   args.batch_first,
                                   shuffle=False)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    args.device = device

    # instantiate model
    model = BaseModel(input_size,
                      output_size,
                      embedding_dim,
                      hidden_dim,
                      num_layers,
                      batch_first=args.batch_first)
    model.load_state_dict(torch.load('model.pt'))
    model = model.to(device)

    print(test_dataset.labels_vocab.itow)
    target_names = [w for i, w in test_dataset.labels_vocab.itow.items()]
    # Test The Model
    pred, true = test(args, test_loader, model)

    accuracy = (true == pred).sum() / len(pred)
    print("Test Accuracy : {:.5f}".format(accuracy))

    ## Save result
    strFormat = '%10s%10s\n'

    with open('result.txt', 'w') as f:
        f.write('Test Accuracy : {:.5f}\n'.format(accuracy))
train_set = CACDDataset("./data/CACD2000_train.hdf5", val_transform,
                        inv_normalize)
val_set = CACDDataset("./data/CACD2000_val.hdf5", val_transform, inv_normalize)

train_dataloader = DataLoader(train_set,
                              batch_size=BATCH_SIZE,
                              num_workers=4,
                              shuffle=True)
val_dataloader = DataLoader(val_set,
                            batch_size=BATCH_SIZE,
                            num_workers=4,
                            shuffle=False)

base_model = BaseModel(IF_PRETRAINED=True)
base_model.to(device)
base_model.load_state_dict(torch.load(MODEL_LOAD_PATH)['model'])
base_model.eval()

# ------------------------- Loss loading --------------------------------
camera_distance = 2.732
elevation = 0
azimuth = 0

renderer = sr.SoftRenderer(image_size=250,
                           sigma_val=1e-4,
                           aggr_func_rgb='hard',
                           camera_mode='look_at',
                           viewing_angle=30,
                           fill_back=False,
                           perspective=True,
                           light_intensity_ambient=1.0,
Exemplo n.º 6
0
tokens = [[CLS] + [processor.piece_to_id(subword)
                   for subword in sentence] + [SEP] for sentence in sentences]
sentences = [['<cls>'] + sentence + ['<sep>'] for sentence in sentences]
tokens = pad_sequence(
    [torch.Tensor(sentence).to(torch.long) for sentence in tokens],
    padding_value=PAD)

pretrained_model_name = 'pretrained_final.pth'

# You can use a model which have been pretrained over 200 epochs by TA
# If you use this saved model, you should mention it in the report
#
# pretrained_model_name = 'pretrained_byTA.pth'

model = BaseModel(token_num=len(processor))
model.load_state_dict(torch.load(pretrained_model_name, map_location='cpu'),
                      strict=False)

model.eval()

output = model(tokens)
output = pack_padded_sequence(output, (output[..., 0] != PAD).sum(0),
                              enforce_sorted=False)
temp = (output.data - output.data.mean(dim=0))
covariance = 1.0 / len(output.data) * temp.T @ temp
U, S, V = covariance.svd()
output = PackedSequence_(temp @ U[:, :7], output.batch_sizes,
                         output.sorted_indices, output.unsorted_indices)
output, _ = pad_packed_sequence(output, batch_first=True, padding_value=PAD)

_, ax = plt.subplots(nrows=2, ncols=len(sentences) // 2)
ax = list(chain.from_iterable(ax))
Exemplo n.º 7
0
    # prepare GPU
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # prepare writer
    os.makedirs(args.save_dir, exist_ok=True)
    writer = SummaryWriter(log_dir=args.save_dir)
    
    # prepare model
    network = BaseModel().to(device)

    # load ckpt
    best_acc = 0
    if len(args.load_ckpt) > 0:
        ckpt = torch.load(args.load_ckpt)
        network.load_state_dict(ckpt['model'])
        best_acc = ckpt['best_acc']
        print('Loaded ckpt {}, best Acc: {}'.format(args.load_ckpt, best_acc))

    if args.train:
        # prepare dataloader
        train_loader = DataLoader(dataset=CustomData('train', dir_path=args.data_dir),
                                  batch_size=args.batch_size,
                                  num_workers=args.num_workers,
                                  shuffle=True)

        val_loader = DataLoader(dataset=CustomData('val', dir_path=args.data_dir),
                                batch_size=args.batch_size,
                                num_workers=args.num_workers,
                                shuffle=False)
        # prepare optimizer