Beispiel #1
0
def rand_samples(batch_size, params, rand_type='uniform'):
    # randomly sample background locations
    rand_feats_orig = torch.rand(batch_size, 3).to(params['device']) * 2 - 1

    if rand_type == 'spherical':
        theta = (
            (rand_feats_orig[:, 1].unsqueeze(1) + 1) / 2.0) * (2 * math.pi)
        r_lon = torch.sqrt(
            1.0 - rand_feats_orig[:, 0].unsqueeze(1)**2) * torch.cos(theta)
        r_lat = torch.sqrt(
            1.0 - rand_feats_orig[:, 0].unsqueeze(1)**2) * torch.sin(theta)
        rand_feats_orig = torch.cat(
            (r_lon, r_lat, rand_feats_orig[:, 2].unsqueeze(1)), 1)

    rand_feats = ut.encode_loc_time(rand_feats_orig[:, :2],
                                    rand_feats_orig[:, 2],
                                    concat_dim=1,
                                    params=params)
    return rand_feats
Beispiel #2
0
def rand_samples(batch_size, params, rand_type='uniform'):
    '''
    randomly sample background locations, generate (lon, lat, date) and put into pre loc encoder
    Note that the generated (lon, lat) are between [-1, 1] for geo_net
    But for our spa_enc, they generate real (lat, lon)
    Return:
        rand_feats: shape (batch_size, input_feat_dim)
    '''
    spa_enc_type = params['spa_enc_type']

    # randomly sample background locations and date
    # the generated location and date from [-1, 1]
    rand_feats_orig = torch.rand(batch_size, 3).to(params['device']) * 2 - 1

    if rand_type == 'spherical':
        # theta is between (0, 2*pi), computed based on latitude
        theta = (
            (rand_feats_orig[:, 1].unsqueeze(1) + 1) / 2.0) * (2 * math.pi)
        r_lon = torch.sqrt(
            1.0 - rand_feats_orig[:, 0].unsqueeze(1)**2) * torch.cos(theta)
        r_lat = torch.sqrt(
            1.0 - rand_feats_orig[:, 0].unsqueeze(1)**2) * torch.sin(theta)
        # rand_feats_orig: (batch_size, 3)
        rand_feats_orig = torch.cat(
            (r_lon, r_lat, rand_feats_orig[:, 2].unsqueeze(1)), 1)

    if spa_enc_type == "geo_net":
        rand_feats = ut.encode_loc_time(rand_feats_orig[:, :2],
                                        rand_feats_orig[:, 2],
                                        concat_dim=1,
                                        params=params)

    elif spa_enc_type in ut.get_spa_enc_list():
        lon = torch.unsqueeze(rand_feats_orig[:, 0] * 180, dim=1)
        lat = torch.unsqueeze(rand_feats_orig[:, 1] * 90, dim=1)
        # rand_feats: shape (batch_size, input_feat_dim = 2)
        rand_feats = torch.cat((lon, lat), 1).to(params["device"])
    else:
        raise Exception("spa_enc not defined!!!")

    return rand_feats
Beispiel #3
0
def rand_samples(batch_size, params, rand_type='uniform'):
    # randomly sample background locations
    if rand_type == 'spherical':
        rand_feats_orig = torch.rand(batch_size, 3).to(params['device'])
        rand_feats_orig[:,
                        2] = rand_feats_orig[:,
                                             2] * 2.0 - 1.0  # make dates between -1 and 1
        theta1 = 2.0 * math.pi * rand_feats_orig[:, 0]
        theta2 = torch.acos(2.0 * rand_feats_orig[:, 1] - 1.0)
        lat = 1.0 - 2.0 * theta2 / math.pi
        lon = (theta1 / math.pi) - 1.0
        rand_feats = torch.cat((lon.unsqueeze(1), lat.unsqueeze(1),
                                rand_feats_orig[:, 2].unsqueeze(1)), 1)

    elif rand_type == 'uniform':
        rand_feats = torch.rand(batch_size, 3).to(params['device']) * 2.0 - 1.0

    rand_feats = ut.encode_loc_time(rand_feats[:, :2],
                                    rand_feats[:, 2],
                                    concat_dim=1,
                                    params=params)
    return rand_feats
Beispiel #4
0
    #
    # neural network spatio-temporal prior
    #
    if 'geo_net' in eval_params['algs']:
        print('\nNeural net prior')
        print(' Model :\t' + os.path.basename(nn_model_path))
        net_params = torch.load(nn_model_path)
        params = net_params['params']

        # construct features
        val_locs_scaled = ut.convert_loc_to_tensor(val_locs)
        val_dates_scaled = torch.from_numpy(
            val_dates.astype(np.float32) * 2 - 1)
        val_feats_net = ut.encode_loc_time(val_locs_scaled,
                                           val_dates_scaled,
                                           concat_dim=1,
                                           params=params)

        model = models.FCNet(params['num_feats'], params['num_classes'],
                             params['num_filts'], params['num_users'])
        model.load_state_dict(net_params['state_dict'])
        model.eval()
        pred_geo_net = compute_acc(val_preds,
                                   val_classes,
                                   val_split,
                                   val_feats=val_feats_net,
                                   prior_type='geo_net',
                                   prior=model)

    #
    # Tang et al ICCV 2015, Improving Image Classification with Location Context
Beispiel #5
0
def generate_feats(locs, dates, params):
    x_locs = ut.convert_loc_to_tensor(locs, params['device'])
    x_dates = torch.from_numpy(dates.astype(np.float32)*2 - 1).to(params['device'])
    feats = ut.encode_loc_time(x_locs, x_dates, concat_dim=1, params=params)
    return feats