# transforms.RandomApply([transformx],p=0.15),
        # transforms.RandomChoice([
        # transforms.ColorJitter(brightness=(0.6,1.4)),
        # transforms.RandomRotation(degrees=(-10,10),resample=3),
        # transforms.RandomResizedCrop((112,112),scale=(0.9,1.0),ratio=(1.0,1.0),interpolation=3),
        # ]),
        transforms.ToTensor(),
        transforms.Normalize(mean = RGB_MEAN,
                             std = RGB_STD),
    ])

    dataset_train = datasets.ImageFolder(os.path.join(DATA_ROOT, train_name), train_transform)

    # create a weighted random sampler to process imbalanced data
    weights = make_weights_for_balanced_classes_old(dataset_train.imgs, len(dataset_train.classes))
    weights = torch.DoubleTensor(weights)
    sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights))

    train_loader = torch.utils.data.DataLoader(
        dataset_train, batch_size = BATCH_SIZE, sampler = sampler, pin_memory = PIN_MEMORY,
        num_workers = NUM_WORKERS, drop_last = DROP_LAST
    )
    # train_loader = torch.utils.data.DataLoader(
    #     dataset_train, batch_size = BATCH_SIZE, shuffle= True, pin_memory = PIN_MEMORY,
    #     num_workers = NUM_WORKERS, drop_last = DROP_LAST
    # )

    NUM_CLASS = len(train_loader.dataset.classes)
    print("Number of Training Classes: {}".format(NUM_CLASS))

    # lfw, cfp_ff, cfp_fp, agedb, calfw, cplfw, vgg2_fp, lfw_issame, cfp_ff_issame, cfp_fp_issame, agedb_issame, calfw_issame, cplfw_issame, vgg2_fp_issame = get_val_data(DATA_ROOT)
示例#2
0
    def test(model, dataloader, dataset, sub_goal_indexes, best_of_n=20):
        model.eval()

        total_dest_err = 0.
        total_overall_err = 0.

        for i, trajx in enumerate(dataloader):
            x = trajx['src'][:, :, :2]
            y = trajx['trg'][:, :, :2]
            x = x - trajx['src'][:, -1:, :2]
            y = y - trajx['src'][:, -1:, :2]

            x *= args.data_scale
            y *= args.data_scale
            x = x.double().cuda()
            y = y.double().cuda()

            y = y.cpu().numpy()

            x = x.view(-1, x.shape[1] * x.shape[2])

            plan = y[:, sub_goal_indexes, :].reshape(y.shape[0], -1)
            all_plan_errs = []
            all_plans = []
            for _ in range(best_of_n):
                # dest_recon = model.forward(x, initial_pos, device=device)
                # modes = torch.tensor(k % args.ny, device=device).long().repeat(batch_size)
                plan_recon = model.forward(x, mask=None)
                plan_recon = plan_recon.detach().cpu().numpy()
                all_plans.append(plan_recon)
                plan_err = np.linalg.norm(plan_recon - plan, axis=-1)
                all_plan_errs.append(plan_err)

            all_plan_errs = np.array(all_plan_errs)
            all_plans = np.array(all_plans)
            indices = np.argmin(all_plan_errs, axis=0)
            best_plan = all_plans[indices, np.arange(x.shape[0]), :]

            # FDE
            best_dest_err = np.linalg.norm(best_plan[:, -2:] - plan[:, -2:],
                                           axis=1).sum()

            best_plan = torch.DoubleTensor(best_plan).cuda()
            interpolated_future = model.predict(x, best_plan)
            interpolated_future = interpolated_future.detach().cpu().numpy()

            # ADE
            predicted_future = np.reshape(interpolated_future,
                                          (-1, args.future_length, 2))
            overall_err = np.linalg.norm(y - predicted_future,
                                         axis=-1).mean(axis=-1).sum()

            overall_err /= args.data_scale
            best_dest_err /= args.data_scale

            total_overall_err += overall_err
            total_dest_err += best_dest_err

        total_overall_err /= len(dataset)
        total_dest_err /= len(dataset)

        return total_overall_err, total_dest_err
示例#3
0
def drosenbrock(tensor):
    x, y = tensor
    return torch.DoubleTensor(
        (-400 * x * (y - x**2) - 2 * (1 - x), 200 * (y - x**2)))
示例#4
0
def main():
    
    ############## Hyperparameters ##############
    update_timestep = 1     #TD use == 1 # update policy every n timesteps  set for TD
    K_epochs = 2        # update policy for K epochs  lr太大会出现NAN?
    eps_clip = 0.2            
    gamma = 0.9           
    
    episode = 0
    training_stage = 65

    sample_lr = [
        0.0001, 0.00009, 0.00008, 0.00007, 0.00006, 0.00005, 0.00004, 0.00003,
        0.00002, 0.00001, 0.000009, 0.000008, 0.000007, 0.000006, 0.000005,
        0.000004, 0.000003, 0.000002, 0.000001
    ]
    lr = 0.0001   #random_seed = None
    state_dim = 3
    action_dim = 1 
    #(self, state_dim, action_dim, lr, betas, gamma, K_epochs, eps_clip)
    actor_path = os.getcwd()+'/PPO_Mixedinput_Navigation_Model/weight/ppo_TD3lstm_actor.pkl'
    critic_path = os.getcwd()+'/PPO_Mixedinput_Navigation_Model/weight/ppo_TD3lstm_critic.pkl'
    ################ load ###################
    if episode >training_stage : #50 100
        try:
            lr = sample_lr[int(episode //training_stage)]
        except(IndexError):
            lr = 0.000001#0.000001*(0.99 ** ((episode-1000) // 10))

    ppo =  PPO(state_dim, action_dim, lr, gamma, K_epochs, eps_clip)
    if os.path.exists(actor_path):
        ppo.actor.load_state_dict(torch.load(actor_path))
        print('Actor Model loaded')
    if os.path.exists(critic_path):
        ppo.critic.load_state_dict(torch.load(critic_path))
        print('Critic Model loaded')
    print("Waiting for GAMA...")

    ################### initialization ########################
    save_curve_pic = os.getcwd()+'/PPO_Mixedinput_Navigation_Model/result/PPO_3LSTM_loss_curve.png'
    save_critic_loss = os.getcwd()+'/PPO_Mixedinput_Navigation_Model/training_data/PPO_TD3_critic_loss.csv'
    save_reward = os.getcwd()+'/PPO_Mixedinput_Navigation_Model/training_data/PPO_TD3_reward.csv'
    save_speed = os.path.abspath(os.curdir)+'/PPO_Mixedinput_Navigation_Model/training_data/AC_average_speed.csv'
    save_NPC_speed = os.path.abspath(os.curdir)+'/PPO_Mixedinput_Navigation_Model/training_data/NPC_speed.csv'
    reset()
    memory = Memory()

    advantages =0 #global value
    loss = []
    total_loss = []
    rewards = []
    total_rewards = []
    average_speed = []
    test = "GAMA"
    state,reward,done,time_pass,over ,average_speed_NPC = GAMA_connect(test) #connect
    #[real_speed/10, target_speed/10, elapsed_time_ratio, distance_left/100,distance_front_car/10,distance_behind_car/10,reward,done,over]
    print("done:",done,"timepass:"******"state ",state)
            #print("reward",reward)
            average_speed.append(state[0])
            rewards.append(reward)
            memory.rewards.append(reward)
            memory.is_terminals.append(done)
            state = torch.DoubleTensor(state).reshape(1,state_dim).to(device) 
            state_img = generate_img() 
            tensor_cv = torch.from_numpy(np.transpose(state_img, (2, 0, 1))).double().to(device)
            if  len(memory.states_next) ==0:
                for _ in range(3):
                    memory.states_next = memory.states
                    memory.states_next[2] = state
                    memory.states_img_next = memory.states_img
                    memory.states_img_next [2]= tensor_cv
            else:
                del memory.states_next[:1]
                del memory.states_img_next[:1]
                memory.states_next.append(state)
                memory.states_img_next.append(tensor_cv)
            loss_ = ppo.update(memory,lr,advantages,done)
            loss.append(loss_)
            del memory.logprobs[:]
            del memory.rewards[:]
            del memory.is_terminals[:]
            #memory.clear_memory()

            action = ppo.select_action(state,tensor_cv, memory)
            send_to_GAMA([[1,float(action*10)]])
            #print("acceleration ",float(action))

        # 終わり 
        elif done == 1:
            average_speed.append(state[0])
            #先传后计算
            print("state_last",state)
            send_to_GAMA( [[1,0]] ) 
            rewards.append(reward) 

            del memory.states_next[:1]
            del memory.states_img_next[:1]
            state = torch.DoubleTensor(state).reshape(1,state_dim).to(device) #转化成1行
            memory.states_next.append(state)
            state_img = generate_img() 
            tensor_cv = torch.from_numpy(np.transpose(state_img, (2, 0, 1))).double().to(device)
            memory.states_img_next.append(tensor_cv)

            memory.rewards.append(reward)
            memory.is_terminals.append(done)
            loss_ = ppo.update(memory,lr,advantages,done)
            loss.append(loss_)
            memory.clear_memory()

            print("----------------------------------Net_Trained---------------------------------------")
            print('--------------------------Iteration:',episode,'over--------------------------------')
            episode += 1
            loss_sum = sum(loss).cpu().detach().numpy()
            total_loss.append(loss_sum)
            total_reward = sum(rewards)
            total_rewards.append(total_reward)
            cross_loss_curve(loss_sum.squeeze(0),total_reward,save_curve_pic,save_critic_loss,save_reward,np.mean(average_speed)*10,save_speed,average_speed_NPC,save_NPC_speed)
            rewards = []
            loss = []
            if episode >training_stage  : #50 100
                try:
                    lr = sample_lr[int(episode //training_stage)]
                except(IndexError):
                    lr = 0.000003#0.000001*(0.99 ** ((episode-1000) // 10))
            torch.save(ppo.actor.state_dict(),actor_path)
            torch.save(ppo.critic.state_dict(),critic_path)

        #最初の時
        else:
            print('Iteration:',episode,"lr",lr)
            state = torch.DoubleTensor(state).reshape(1,state_dim).to(device) 
            state_img = generate_img() # numpy image: H x W x C (500, 500, 3) -> (3,500,500)
            tensor_cv = torch.from_numpy(np.transpose(state_img, (2, 0, 1))).double().to(device) # np.transpose( xxx,  (2, 0, 1)) torch image: C x H x W
            action = ppo.select_action(state, tensor_cv,memory)
            print("acceleration: ",action) 
            send_to_GAMA([[1,float(action*10)]])

        state,reward,done,time_pass,over ,average_speed_NPC= GAMA_connect(test)

    return None 
示例#5
0
def tensor(data, **kwargs):
    """Returns a torch.DoubleTensor, unlike torch.tensor which returns FloatTensor by default"""
    return torch.DoubleTensor(data, **kwargs)
示例#6
0
def greedy_matching():
    global batch_size, test_data_t, test_data_f, model, optimizer, emb_indexer_inv, gt_mappings, all_metrics, direct_inputs, direct_targets
    all_results = OrderedDict()
    direct_inputs, direct_targets = [], []
    with torch.no_grad():
        all_pred = []

        np.random.shuffle(test_data_t)
        np.random.shuffle(test_data_f)

        inputs_pos, targets_pos = generate_input(test_data_t, 1)
        inputs_neg, targets_neg = generate_input(test_data_f, 0)

        indices_pos = np.random.permutation(len(inputs_pos))
        indices_neg = np.random.permutation(len(inputs_neg))

        inputs_pos, targets_pos = inputs_pos[indices_pos], targets_pos[
            indices_pos]
        inputs_neg, targets_neg = inputs_neg[indices_neg], targets_neg[
            indices_neg]

        batch_size = min(batch_size, len(inputs_pos))
        num_batches = int(ceil(len(inputs_pos) / batch_size))
        batch_size_f = int(ceil(len(inputs_neg) / num_batches))
        for batch_idx in range(num_batches):
            batch_start = batch_idx * batch_size
            batch_end = (batch_idx + 1) * batch_size

            batch_start_f = batch_idx * batch_size_f
            batch_end_f = (batch_idx + 1) * batch_size_f

            inputs = np.concatenate((inputs_pos[batch_start:batch_end],
                                     inputs_neg[batch_start_f:batch_end_f]))
            targets = np.concatenate((targets_pos[batch_start:batch_end],
                                      targets_neg[batch_start_f:batch_end_f]))

            inp = inputs.transpose(1, 0, 2)

            inp_elems = torch.LongTensor(inputs).to(device)
            targ_elems = torch.DoubleTensor(targets)

            outputs = model(inp_elems)
            outputs = [el.item() for el in outputs]
            targets = [True if el.item() else False for el in targets]

            for idx, pred_elem in enumerate(outputs):
                ent1 = emb_indexer_inv[inp[0][idx][0]]
                ent2 = emb_indexer_inv[inp[1][idx][0]]
                if (ent1, ent2) in all_results:
                    print("Error: ", ent1, ent2, "already present")
                all_results[(ent1, ent2)] = (pred_elem, targets[idx])

        direct_targets = [True if el else False for el in direct_targets]

        print("Len (direct inputs): ", len(direct_inputs))
        for idx, direct_input in enumerate(direct_inputs):
            ent1 = emb_indexer_inv[direct_input[0]]
            ent2 = emb_indexer_inv[direct_input[1]]
            sim = cos_sim(emb_vals[direct_input[0]], emb_vals[direct_input[1]])
            all_results[(ent1, ent2)] = (sim, direct_targets[idx])

        optimum_metrics, opt_threshold = [-1000 for i in range(5)], -1000
        low_threshold = np.min([el[0] for el in all_results.values()]) - 0.02
        high_threshold = np.max([el[0] for el in all_results.values()]) + 0.02
        threshold = low_threshold
        step = 0.001
        opt_fn, opt_fp = [], []
        while threshold < high_threshold:
            res = []
            for i, key in enumerate(all_results):
                if all_results[key][0] > threshold:
                    res.append(key)
            fn_list = [(key, all_results[key][0]) for key in gt_mappings
                       if key not in set(res) and not is_valid(test_onto, key)]
            fp_list = [(elem, all_results[elem][0]) for elem in res
                       if not all_results[elem][1]]
            tp_list = [(elem, all_results[elem][0]) for elem in res
                       if all_results[elem][1]]

            tp, fn, fp = len(tp_list), len(fn_list), len(fp_list)
            exception = False

            try:
                precision = tp / (tp + fp)
                recall = tp / (tp + fn)
                f1score = 2 * precision * recall / (precision + recall)
                f2score = 5 * precision * recall / (4 * precision + recall)
                f0_5score = 1.25 * precision * recall / (0.25 * precision +
                                                         recall)
            except Exception as e:
                print(e)
                exception = True
                step = 0.001
                threshold += step
                continue
            print("Threshold: ", threshold, precision, recall, f1score,
                  f2score, f0_5score)

            if f1score > optimum_metrics[2]:
                optimum_metrics = [
                    precision, recall, f1score, f2score, f0_5score
                ]
                opt_threshold = threshold
                opt_fn = fn_list
                opt_fp = fp_list

            if threshold > 0.98 and not exception:
                step = 0.0001
            else:
                step = 0.001
            print(step, threshold, exception)
            threshold += step
        print(
            "Precision: {} Recall: {} F1-Score: {} F2-Score: {} F0.5-Score: {}"
            .format(*optimum_metrics))
        all_fn.extend(opt_fn)
        all_fp.extend(opt_fp)
        if optimum_metrics[2] != -1000:
            all_metrics.append((opt_threshold, optimum_metrics))
    return all_results
 def __init__(self, weights, num_samples, replacement=True):
     self.weights = torch.DoubleTensor(weights)
     self.num_samples = num_samples
     self.replacement = replacement
    output_dim = 3
    sigma_dim = 2
    batch_size = 7
    BBTT_steps = 10
    input_dim = 5
    nnLayerSizes = [20, 20, 20]
    net_type = "LSTM"
    # net_type = "FFNN"

    input_ = torch.randn([batch_size, BBTT_steps + 1, input_dim])

    net = NET(net_type, BBTT_steps, input_dim, nnLayerSizes, output_dim,
              sigma_dim)
    print(net)

    output, sigma = net.forwardVector(input_)

    input_ = torch.DoubleTensor(input_)
    print("Input size KxTxD")
    print(input_.size())
    print("Output size KxN")
    print(output.size())
    print("Sigma size KxN")
    print(sigma.size())

    # if(PRINT_OUTPUT): print(dir(output))
    # if(PRINT_OUTPUT): print(output.__dict__)
    # if(PRINT_OUTPUT): print(output)
    # if(PRINT_OUTPUT): print(output.detach().numpy())
示例#9
0
    def getDataloader(self):
        trainTransform = A.Compose([
            A.transforms.HorizontalFlip(p=0.5),
            A.transforms.VerticalFlip(p=0.5),
            A.transforms.Normalize(mean=(0.57, 0.94, 0.45),
                                   std=(0.15, 0.17, 0.10)),
            ToTensorV2()
        ])
        testTransform = A.Compose([
            A.transforms.Normalize(mean=(0.57, 0.94, 0.45),
                                   std=(0.15, 0.17, 0.10)),
        ])
        working_path = Path(os.getcwd())
        working_path = working_path.parent.parent
        dataPath = Path.joinpath(working_path, 'Data')
        trainPath = Path.joinpath(dataPath, 'Train')
        valPath = Path.joinpath(dataPath, 'Validation')

        trainImgPath = Path.joinpath(trainPath, 'TrainImages')
        valImgPath = Path.joinpath(valPath, 'ValidationImages')
        testImgPath = Path.joinpath(dataPath, 'Test', 'TestImages')

        trainLabels = str(trainPath) + '\\trainLbls.csv'
        valLabels = str(valPath) + '\\valLbls.csv'

        listTrainData = natsorted(
            [os.path.join(trainImgPath, f) for f in os.listdir(trainImgPath)])
        listTestData = natsorted(
            [os.path.join(testImgPath, f) for f in os.listdir(testImgPath)])
        listValData = natsorted(
            [os.path.join(valImgPath, f) for f in os.listdir(valImgPath)])

        albuTrainData = ImageDataset(imageList=listTrainData,
                                     cvsFile=trainLabels,
                                     transform=trainTransform)
        albuTestData = ImageDataset(imageList=listTestData,
                                    cvsFile=None,
                                    transform=testTransform)
        albuValData = ImageDataset(imageList=listValData,
                                   cvsFile=valLabels,
                                   transform=testTransform)

        classes = pd.read_csv(trainLabels, header=None, names=['Labels'])
        classes = classes['Labels'].unique()

        balanced_weights = self.utils.create_weights_to_balance_classes(
            albuTrainData.lbls.Labels - 1, len(classes))
        balanced_weights = torch.DoubleTensor(balanced_weights)
        balanced_sampler = torch.utils.data.sampler.WeightedRandomSampler(
            balanced_weights, len(balanced_weights))

        trainDataLoader = torch.utils.data.DataLoader(
            albuTrainData,
            batch_size=self.batch_size,
            num_workers=self.num_workers,
            sampler=balanced_sampler,
            drop_last=True,
            pin_memory=True)
        testDataLoader = torch.utils.data.DataLoader(
            albuTestData,
            batch_size=self.batch_size,
            num_workers=self.num_workers,
            drop_last=True,
            pin_memory=True)

        valDataLoader = torch.utils.data.DataLoader(
            albuValData,
            batch_size=self.batch_size,
            num_workers=self.num_workers,
            drop_last=True,
            pin_memory=True)
        return classes, trainDataLoader, testDataLoader, valDataLoader
示例#10
0
import odl
from odl.contrib.pytorch import TorchOperator

# --- Forward --- #

# Define ODL operator
matrix = np.array([[1, 2, 3], [4, 5, 6]], dtype=float)
odl_op = odl.MatrixOperator(matrix)

# Wrap as torch operator
torch_op = TorchOperator(odl_op)

# Define evaluation point and wrap into a variable. Mark as
# `requires_gradient`, otherwise `backward()` doesn't do anything.
# This is supported by the ODL wrapper.
x = torch.DoubleTensor([1, 1, 1])
x_var = torch.autograd.Variable(x, requires_grad=True)

# Evaluate torch operator
res_var = torch_op(x_var)

# ODL result
odl_res = odl_op(x.numpy())

print('pytorch result: ', res_var.data.numpy())
print('ODL result    : ', odl_res.asarray())

# --- Gradient (backward) --- #

# Define ODL cost functional
odl_cost = odl.solvers.L2NormSquared(odl_op.range)
示例#11
0
        self.k = k
        self.DOF = DOF

    def get_f(self, x):
        f = self.k * x**3
        return f


""" 主程序"""

if __name__ == "__main__":
    Nt = 2048
    Nh = 5
    Nf = 2

    K = torch.DoubleTensor([[3.0e4, -1.5e4], [-1.5e4, 1.5e4]])
    M = torch.DoubleTensor([[1, 0], [0, 1]])
    C = torch.DoubleTensor([[2, -1], [-1, 1]])

    model = Model(M, C, K, Nh)

    force_nl = Force_gap(DOF=(0, ), g=0.0001, k=1e7)
    # force_nl = Force_cubic(DOF=(0, ), k=5e9)
    force_ex = Force_EX(DOF=(0, ), Nh=Nh, Nf=Nf)
    aft = AFT.AFT(Nh=Nh, Nt=Nt, Nf=Nf, force=force_nl)

    w1 = (50 + 0) * 2 * np.pi
    w2 = (50 + 1) * 2 * np.pi

    N_solver = Solver.Newton_Solver(model=model,
                                    force_ex=force_ex,
def inner_objective(f, seed):
    torch.manual_seed(seed)
    batch_size = 500
    x = torch.DoubleTensor(batch_size, 2).uniform_(-5, 5)
    y = f(x)
    return torch.nn.MSELoss()(f_target(x), y[:, 0])
示例#13
0
                beta2,
                group["lr"],
                group["weight_decay"],
                group["eps"],
            )
        return loss


if __name__ == "__main__":

    def rosenbrock(tensor):
        x, y = tensor
        return (1 - x)**2 + 100 * (y - x**2)**2

    def drosenbrock(tensor):
        x, y = tensor
        return torch.DoubleTensor(
            (-400 * x * (y - x**2) - 2 * (1 - x), 200 * (y - x**2)))

    params = torch.DoubleTensor((1.5, 1.5))
    gradclipadam = GradClipAdam(params, lr=1e-4)
    gradclipadam.zero_grad()

    y_hat = rosenbrock(params)

    y_hat.backward()
    gradclipadam.step()
    # for i in range(100):
    #     algorithm(lambda x: (rosenbrock(x), drosenbrock(x)), params, config)
    #     print("{:.8f}\t{:.8f}\t".format(params[0], params[1]))
示例#14
0
# This is a test among maxpooling, convolution and RRSVM

from torch.autograd import Variable
import torch
from torch.autograd import gradcheck
import numpy as np
import torch
import torch.nn.functional as F


if __name__ == '__main__':
    kernel_size = 2
    n_channel = 100
    feature_size = 6
    batch_size = 3
    input = (Variable(torch.DoubleTensor(torch.randn(1, n_channel, feature_size, feature_size).double()), requires_grad=True), kernel_size)
    f_max_pooling = F.max_pool2d
    print gradcheck(f_max_pooling, input, eps=1e-3)
示例#15
0
    model.eval()
    seq = test_input
    future = []

    with torch.no_grad():
        model.hidden_cell = (torch.zeros(1, 1, model.hidden_layer_size).double().to(device),
                        torch.zeros(1, 1, model.hidden_layer_size).double().to(device))
        model.hidden_cell_2 = (torch.zeros(1, 1, model.input_size).double().to(device),
                        torch.zeros(1, 1, model.input_size).double().to(device))
        for i in range(nfuture):
            y = model(seq.double().to(device)).detach().cpu().numpy()
            future.append(y[-1])
            seq = seq.detach().cpu().tolist()
            seq.append(y[-1])
            seq.pop(0)
            seq = torch.DoubleTensor(seq)
    
    y = np.array(future)
    x = [test_input_df[t_].max() + datetime.timedelta(hours=i) for i in range(len(future))]
    assert len(x)==len(y)    
    
    """
    Plotting
    """
    import plotly.graph_objects as go

    fig = go.Figure()
    fig.add_trace(go.Scatter(
        y=df[T_].to_numpy(),
        x=timestamp,
        mode='lines+markers',
示例#16
0
 def count_parameters(m, x, y):
     total_params = 0
     for p in m.parameters():
         total_params += torch.DoubleTensor([p.numel()])
     m.total_params[0] = total_params
示例#17
0
        num_batches = int(ceil(len(inputs_pos) / batch_size))
        batch_size_f = int(ceil(len(inputs_neg) / num_batches))

        for batch_idx in range(num_batches):
            batch_start = batch_idx * batch_size
            batch_end = (batch_idx + 1) * batch_size

            batch_start_f = batch_idx * batch_size_f
            batch_end_f = (batch_idx + 1) * batch_size_f
            inputs = np.concatenate((inputs_pos[batch_start:batch_end],
                                     inputs_neg[batch_start_f:batch_end_f]))
            targets = np.concatenate((targets_pos[batch_start:batch_end],
                                      targets_neg[batch_start_f:batch_end_f]))

            inp_elems = torch.LongTensor(inputs).to(device)
            targ_elems = torch.DoubleTensor(targets).to(device)
            optimizer.zero_grad()
            outputs = model(inp_elems)
            loss = F.mse_loss(outputs, targ_elems)
            loss.backward()
            optimizer.step()

            if batch_idx % 500 == 0:
                print("Epoch: {} Idx: {} Loss: {}".format(
                    epoch, batch_idx, loss.item()))

    model.eval()
    print("Trained value of W: ", model.output.weight)
    torch.save(model.state_dict(), "/u/vivek98/attention.pt")

    test_data_t = [key for key in test_data if test_data[key]]
示例#18
0
from models.value_network import Value


print(gym.__version__)


use_gpu = torch.cuda.is_available()

Tensor = torch.cuda.DoubleTensor if use_gpu else torch.DoubleTensor
LongTensor = torch.cuda.LongTensor if use_gpu else torch.LongTensor
if use_gpu:
    torch.set_default_tensor_type('torch.cuda.DoubleTensor')
    PI = torch.cuda.DoubleTensor([3.1415926])
else:
    torch.set_default_tensor_type('torch.DoubleTensor')
    PI = torch.DoubleTensor([3.1415926])

env_name = 'Humanoid-v2'
render = False
log_std = 0
gamma = 0.99
tau = 0.95
l2_reg = 1e-3
learning_rate = 0.0003
clip_epsilon = 0.2 # for PPO
seed = 1
min_batch_size = 10000
total_iterations = 1000
log_interval = 1
optim_epochs = 5
optim_batch_size = 64
示例#19
0
def conjugate_gradiant(A_grad, b, model, is_cuda, hessian=None):
    x = torch.DoubleTensor(np.zeros((b.size(0), b.size(1))) + 1)
    b_norm = max(b.norm(), 1e-20)
    k = 0
    if hessian is not None:
        I = torch.DoubleTensor(np.identity(hessian.data.size(0)) * 1e-4)
        if is_cuda:
            I = I.cuda()
        hessian += I
    if is_cuda:
        x = x.cuda()
    if hessian is not None:
        r_i = b - hessian @ x
    else:
        r_i = b - hessian_vector_product(A_grad, x, model, k)
    d_i = r_i.clone()
    tolerance = 1e-3
    i = 0
    no_update_i = 0
    max_iter = 5000
    print('i:{}, r norm:{}'.format(i, r_i.norm()))
    min_r = (b - hessian_vector_product(A_grad, x, model, 0)).norm()
    best_x = x.clone()
    while min_r / b_norm > tolerance:
        rr_i = r_i.permute(1, 0) @ r_i
        if hessian is not None:
            A_d = hessian @ d_i
        else:
            A_d = hessian_vector_product(A_grad, d_i, model, k)
        alpha = rr_i / (d_i.permute(1, 0) @ A_d)
        x += alpha * d_i
        r_i -= alpha * A_d
        rr_i_new = r_i.permute(1, 0) @ r_i
        d_i = r_i + (rr_i_new / rr_i) * d_i
        i += 1
        no_update_i += 1
        if rr_i_new.sqrt() < min_r:
            r = b - hessian_vector_product(A_grad, x, model, 0)
            if r.norm() < min_r:
                min_r = r.norm()
                best_x = x.clone()
                no_update_i = 0
        if i % 100 == 0:
            print(
                'i:{}, r norm:{}, min r norm:{}, releative: {} same best: {}'.
                format(i, rr_i_new.sqrt(), min_r, min_r / b_norm, no_update_i))
        if i == max_iter or no_update_i == max_iter / 10:
            if min_r / b_norm > 1:
                if max_iter > 10000:
                    break
                max_iter += 1000
                k = b_norm * 4 * 1e-3
                x = best_x.clone()
                r_i = b - hessian_vector_product(A_grad, x, model, k)
                d_i = r_i.clone()
            else:
                break

    r = b - hessian_vector_product(A_grad, best_x, model, 0)
    print(r.norm())
    print(b_norm)
    best_x = best_x.cpu()
    return best_x, i
示例#20
0
def _number_format(tensor, min_sz=-1):
    min_sz = max(min_sz, 2)
    tensor = torch.DoubleTensor(tensor.nelement()).copy_(tensor).abs_()

    pos_inf_mask = tensor.eq(float('inf'))
    neg_inf_mask = tensor.eq(float('-inf'))
    nan_mask = tensor.ne(tensor)
    invalid_value_mask = pos_inf_mask + neg_inf_mask + nan_mask
    if invalid_value_mask.all():
        example_value = 0
    else:
        example_value = tensor[invalid_value_mask.eq(0)][0]
    tensor[invalid_value_mask] = example_value
    if invalid_value_mask.any():
        min_sz = max(min_sz, 3)

    int_mode = True
    # TODO: use fmod?
    for value in tensor:
        if value != math.ceil(value):
            int_mode = False
            break

    exp_min = tensor.min()
    if exp_min != 0:
        exp_min = math.floor(math.log10(exp_min)) + 1
    else:
        exp_min = 1
    exp_max = tensor.max()
    if exp_max != 0:
        exp_max = math.floor(math.log10(exp_max)) + 1
    else:
        exp_max = 1

    scale = 1
    exp_max = int(exp_max)
    prec = PRINT_OPTS.precision
    if int_mode:
        if exp_max > prec + 1:
            format = '{{:11.{}e}}'.format(prec)
            sz = max(min_sz, 7 + prec)
        else:
            sz = max(min_sz, exp_max + 1)
            format = '{:' + str(sz) + '.0f}'
    else:
        if exp_max - exp_min > prec:
            sz = 7 + prec
            if abs(exp_max) > 99 or abs(exp_min) > 99:
                sz = sz + 1
            sz = max(min_sz, sz)
            format = '{{:{}.{}e}}'.format(sz, prec)
        else:
            if exp_max > prec + 1 or exp_max < 0:
                sz = max(min_sz, 7)
                scale = math.pow(10, exp_max - 1)
            else:
                if exp_max == 0:
                    sz = 7
                else:
                    sz = exp_max + 6
                sz = max(min_sz, sz)
            format = '{{:{}.{}f}}'.format(sz, prec)
    return format, scale, sz
示例#21
0
 def reset(self):
     self.scores = torch.DoubleTensor(torch.DoubleStorage()).numpy()
     self.targets = torch.LongTensor(torch.LongStorage()).numpy()
示例#22
0
    def testGrad(self):
        def make_world(learned_force):
            bodies = []
            joints = []

            target = Circle([500, 300], 30)
            bodies.append(target)

            c1 = Circle([250, 210], 30)
            bodies.append(c1)
            c1.add_force(ExternalForce(learned_force))
            c1.add_no_collision(target)

            c2 = Circle([400, 250], 30)
            bodies.append(c2)
            c2.add_no_collision(target)

            world = World(bodies, joints, dt=DT)
            return world, c2, target

        initial_force = torch.DoubleTensor([0, 3, 0])
        initial_force[2] = 0
        initial_force = Variable(initial_force, requires_grad=True)

        # Initial demo
        learned_force = lambda t: initial_force if t < 0.1 else ExternalForce.ZEROS
        # learned_force = gravity
        world, c, target = make_world(learned_force)
        # initial_state = world.save_state()
        # next_fric_coeff = Variable(torch.DoubleTensor([1e-7]), requires_grad=True)
        # c.fric_coeff = next_fric_coeff
        # initial_state = world.save_state()
        run_world(world, run_time=TIME, screen=None)

        learning_rate = 0.001
        max_iter = 100

        dist_hist = []
        last_dist = 1e10
        for i in range(max_iter):
            learned_force = lambda t: initial_force if t < 0.1 else ExternalForce.ZEROS

            world, c, target = make_world(learned_force)
            # world.load_state(initial_state)
            # world.reset_engine()
            # c = world.bodies[0]
            # c.fric_coeff = next_fric_coeff
            run_world(world, run_time=TIME, screen=None)

            dist = (target.pos - c.pos).norm()
            dist.backward()
            grad = initial_force.grad.data
            # grad.clamp_(-10, 10)
            initial_force = Variable(initial_force.data - learning_rate * grad,
                                     requires_grad=True)
            # grad = c.fric_coeff.grad.data
            # grad.clamp_(-10, 10)
            # temp = c.fric_coeff.data - learning_rate * grad
            # temp.clamp_(1e-7, 1)
            learning_rate /= 1.1
            # next_fric_coeff = Variable(temp, requires_grad=True)
            # print(next_fric_coeff)
            if abs((last_dist - dist).data[0]) < 1e-5:
                break
            last_dist = dist
            dist_hist.append(dist)

        world = make_world(learned_force)[0]
        # c.fric_coeff = next_fric_coeff
        # world.load_state(initial_state)
        # world.reset_engine()
        run_world(world, run_time=TIME, screen=None, recorder=None)
        dist = (target.pos - c.pos).norm()
示例#23
0
def as_tensor(*args, **kwargs):
    r = tuple(
        torch.DoubleTensor(arg, **kwargs) if arg is not None else None
        for arg in args)
    return r if len(r) > 1 else r[0]
示例#24
0
    def testInference(self):
        def make_world(forces, mass):
            bodies = []
            joints = []

            # make chain of rectangles
            r = Rect([300, 50], [20, 60])
            bodies.append(r)
            joints.append(Joint(r, None, [300, 30]))
            for i in range(1, 10):
                if i < 9:
                    r = Rect([300, 50 + 50 * i], [20, 60])
                else:
                    r = Rect([300, 50 + 50 * i], [20, 60], mass=mass)
                bodies.append(r)
                joints.append(Joint(bodies[-1], bodies[-2],
                                    [300, 25 + 50 * i]))
                bodies[-1].add_no_collision(bodies[-2])
            bodies[-1].add_force(ExternalForce(gravity, multiplier=100))

            # make projectile
            m = 13
            c1 = Circle([50, 500], 20)
            bodies.append(c1)
            for f in forces:
                c1.add_force(ExternalForce(f, multiplier=100 * m))

            world = World(bodies, joints, dt=DT)
            return world, r

        def positions_run_world(world,
                                dt=Params.DEFAULT_DT,
                                run_time=10,
                                screen=None,
                                recorder=None):
            positions = [torch.cat([b.p for b in world.bodies])]

            while world.t < run_time:
                world.step()
                positions.append(torch.cat([b.p for b in world.bodies]))
            return positions

        MASS_EPS = 1e-7
        forces = [hor_impulse]
        ground_truth_mass = Variable(torch.DoubleTensor([7]))
        world, c = make_world(forces, ground_truth_mass)

        ground_truth_pos = positions_run_world(world,
                                               run_time=10,
                                               screen=None,
                                               recorder=None)
        ground_truth_pos = [p.data for p in ground_truth_pos]
        ground_truth_pos = Variable(torch.cat(ground_truth_pos))

        learning_rate = 0.01
        max_iter = 100

        next_mass = Variable(torch.DoubleTensor([1.3]), requires_grad=True)
        loss_hist = []
        mass_hist = [next_mass]
        last_dist = 1e10
        for i in range(max_iter):
            print(i, end='\r')
            world, c = make_world(forces, next_mass)
            # world.load_state(initial_state)
            # world.reset_engine()
            positions = positions_run_world(world, run_time=10, screen=None)
            positions = torch.cat(positions)
            positions = positions[:len(ground_truth_pos)]
            # temp_ground_truth_pos = ground_truth_pos[:len(positions)]

            loss = torch.nn.MSELoss()(positions, ground_truth_pos)
            loss.backward()
            grad = c.mass.grad.data
            # clip gradient
            grad = torch.max(torch.min(grad, torch.DoubleTensor([100])),
                             torch.DoubleTensor([-100]))
            temp = c.mass.data - learning_rate * grad
            temp = max(MASS_EPS, temp[0])
            next_mass = Variable(torch.DoubleTensor([temp]),
                                 requires_grad=True)
            # learning_rate /= 1.1
            if abs((last_dist - loss).data[0]) < 1e-3:
                break
            last_dist = loss
            loss_hist.append(loss)
            mass_hist.append(next_mass)

        assert abs(next_mass.data[0] - ground_truth_mass.data[0]) <= 1e-1
示例#25
0
        def forward(self, x, dest=None, mask=None, iteration=1, y=None):

            ftraj = self.encoder_past(x)

            if mask:
                for _ in range(self.nonlocal_pools):
                    ftraj = self.non_local_social_pooling(ftraj, mask)

            if self.training:
                pcd = True if len(
                    self.replay_memory) == args.memory_size else False
                if pcd:
                    z_e_0 = self.replay_memory.sample(
                        n=ftraj.size(0)).clone().detach().cuda()
                else:
                    z_e_0 = sample_p_0(n=ftraj.size(0), nz=self.zdim)
                z_e_k, _ = self.sample_langevin_prior_z(Variable(z_e_0),
                                                        ftraj,
                                                        pcd=pcd,
                                                        verbose=(iteration %
                                                                 1000 == 0))
                for _z_e_k in z_e_k.clone().detach().cpu().split(1):
                    self.replay_memory.push(_z_e_k)
            else:
                z_e_0 = sample_p_0(n=ftraj.size(0), nz=self.zdim)
                z_e_k, _ = self.sample_langevin_prior_z(Variable(z_e_0),
                                                        ftraj,
                                                        pcd=False,
                                                        verbose=(iteration %
                                                                 1000 == 0),
                                                        y=y)
            z_e_k = z_e_k.double().cuda()

            if self.training:
                dest_features = self.encoder_dest(dest)
                features = torch.cat((ftraj, dest_features), dim=1)
                latent = self.encoder_latent(features)
                mu = latent[:, 0:self.zdim]
                logvar = latent[:, self.zdim:]

                var = logvar.mul(0.5).exp_()
                eps = torch.DoubleTensor(var.size()).normal_().cuda()
                z_g_k = eps.mul(var).add_(mu)
                z_g_k = z_g_k.double().cuda()

            if self.training:
                decoder_input = torch.cat((ftraj, z_g_k), dim=1)
            else:
                decoder_input = torch.cat((ftraj, z_e_k), dim=1)
            generated_dest = self.decoder(decoder_input)

            if self.training:
                generated_dest_features = self.encoder_dest(generated_dest)
                prediction_features = torch.cat(
                    (ftraj, generated_dest_features), dim=1)
                pred_future = self.predictor(prediction_features)

                en_pos = self.ebm(z_g_k, ftraj).mean()
                en_neg = self.ebm(z_e_k.detach().clone(), ftraj).mean()
                cd = en_pos - en_neg

                return generated_dest, mu, logvar, pred_future, cd, en_pos, en_neg, pcd

            return generated_dest
示例#26
0
"""

import torch

a = torch.tensor([2,2,4])
b = torch.tensor([[1,2,3],[2,3,4],[3,4,5],[4,5,6]])

print(a)
print(b)
print(a.shape)
print(b.shape)
print(a.size())
print(b.size())

c = torch.FloatTensor([[1,2,3],[2,3,4],[3,4,5],[4,5,6]])
d = torch.DoubleTensor([[1,2,3],[2,3,4],[3,4,5],[4,5,6]])

print(c)
print(c.type)
print(d)
print(d.type)

print(c.mean())
print(d.mean())
print(c.std())
print(d.std())

print("Reshape")
print(b.view(-1,1))
print(b.view(12))
print(b.view(-1,4))
示例#27
0
image_datasets = {
    x: datasets.ImageFolder(
        os.path.join(tar_extract_path, tar_name, fold_lst[fold_num], x),
        data_transforms[x])
    for x in ['train', 'val']
}

weights_dict = {
    x: make_weights_for_balanced_classes(image_datasets[x].imgs,
                                         len(image_datasets[x].classes))
    for x in ['train', 'val']
}

sampler_dict = {
    x: torch.utils.data.sampler.WeightedRandomSampler(
        torch.DoubleTensor(weights_dict[x]),
        len(torch.DoubleTensor(weights_dict[x])))
    for x in ['train', 'val']
}

dataloaders_dict_sampler = {
    x: torch.utils.data.DataLoader(image_datasets[x],
                                   batch_size=batch_size,
                                   shuffle=False,
                                   sampler=sampler_dict[x],
                                   num_workers=8)
    for x in ['train', 'val']
}

dataloaders_dict = {
    x: torch.utils.data.DataLoader(image_datasets[x],
示例#28
0
offset = 0

n = 40336
n=20000
split = 0.8
ind = list(range(n))
div = int(n * split)
partition = dict([])
partition['train'] = list(ind[:div])
partition['validation'] = list(ind[div:n])



'''TCN'''
model = TCN(40, 1, args.layers, fcl=args.fcl, kernel_size=2, dropout=args.drop).to(args.device)
criterion = nn.BCEWithLogitsLoss(pos_weight=torch.DoubleTensor([1.8224]).to(args.device), reduction='none')
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, verbose=True, factor=0.1, patience=5)

if load_model:
    model.load_state_dict(torch.load(load_model))
    train_losses = np.concatenate((np.load(load_folder +'/train_losses.npy'), np.zeros(epochs)))
    train_pos_acc = np.concatenate((np.load(load_folder +'/train_pos_acc.npy'), np.zeros(epochs)))
    train_neg_acc = np.concatenate((np.load(load_folder +'/train_neg_acc.npy'), np.zeros(epochs)))
    val_losses = np.concatenate((np.load(load_folder +'/val_losses.npy'), np.zeros(epochs)))
    val_pos_acc = np.concatenate((np.load(load_folder +'/val_pos_acc.npy'), np.zeros(epochs)))
    val_neg_acc = np.concatenate((np.load(load_folder +'/val_neg_acc.npy'), np.zeros(epochs)))
    utility = np.concatenate((np.load(load_folder +'/utility.npy'), np.zeros(epochs)))
    plotter(model_name, utility, train_losses, train_pos_acc, train_neg_acc,
        val_losses, val_pos_acc, val_neg_acc)
else:
示例#29
0
def count_matmul(m, x, y):
    num_mul = x[0].numel() * x[1].size(-1)
    # m.total_ops += torch.DoubleTensor([int(num_mul)])
    m.total_ops += torch.DoubleTensor([int(0)])
示例#30
0
n_steps = 40
tdim = 5
sdim = 10
vol_floor = torch.zeros((sdim, tdim)).double()
vol = torch.tensor([[0.2] * tdim] * sdim).double()
m = tdim * sdim
n = 11
r = 0.02

###------------------------- Calculating Data -------------------------###

iv = torch.zeros(n)
spot = float(35.)
expiries = np.array([0.25] * n)
strike = np.linspace(30., 40., n).astype(float)
vals = torch.DoubleTensor(f.Bachelier(spot, 7., expiries, strike, r))

for i in range(0, vals.size()[0]):
    iv[i] = f.impliedVol(spot, vals[i], expiries[i], strike[i], r)

###------------------------- ADAM Parameters -------------------------###
expiries = torch.tensor(expiries)

beta1 = 0.9
beta2 = 0.999
mt = 0.0
vt = 0.0
vhat = torch.zeros((sdim, tdim)).double()
LearningRate = 0.005

#