num_layers=num_layers, logit_class_weights=[1, 100 * c], count_class_weights=[1, 3]) batch_size = 64 open_every = 10 save_every = 500 log_every = 10 name = "{}-{}_{}".format(str(net), env_type, s) c2 = 1 #%% for i in range(10000000): if i % open_every == 0: X, Y, GN = open_file(env_type=env_type, open_GN=True, open_A=False) SS = np.concatenate([X, GN], 2).transpose((1, 0, 2)) n = SS.shape[1] idx = np.random.choice(n, size=batch_size) inputs = SS[:, idx, :] ytrue = Y[idx] lens = inputs.any(2).sum(0) avg_ones = np.hstack([Y[k, :l, 0] for k, l in zip(idx, lens)]).mean() if avg_ones > 0: w = c * 1 / avg_ones net.logit_loss_fn = nn.CrossEntropyLoss(reduce=False, weight=torch.FloatTensor(
dropout_prob = .4) name = "{}-{}_{}".format( str(net), env_type, s) c = 1 c2 = .25 #%% for i in range(int(1e8)): if i % open_every == 0: print("new file!") if use_gn: A, X, GN, Y = open_file(env_type = env_type, open_A = True, open_GN = True) Z = np.concatenate([X, GN], 2) else: A, X, Y = open_file(env_type = env_type, open_A = True, open_GN = False) Z = X n = A.shape[0] idx = np.random.choice(n, size = batch_size) inputs = (A[idx], Z[idx]) ytrue = Y[idx] lens = inputs[1].any(2).sum(1) avg_ones = np.hstack([Y[k,:l,0] for k,l in zip(idx, lens)]).mean()
decoder_hidden_size = 100 bidirectional = True num_layers = 1 batch_size = 32 rnn = torch.load("results/policy_function_lstm") batch_size = 32 open_every = 100 log_every = 10 save_every = 500 #%% for i_iter in range(1000): if i % open_ == 0: X, Y, GN = open_file(open_GN=True, open_A=False) SS = np.concatenate([X, GN], 2).transpose((1, 0, 2)) n = SS.shape[1] num_ys = None optimizer.zero_grad() idx = np.random.choice(n, size=batch_size) subset = SS[:, idx, :] lens = np.argmax(subset.any(2) == 0, 0) inputs = Variable(torch.FloatTensor(subset), requires_grad=False) order = np.flip(np.argsort(lens), 0).astype(int) order_t = torch.LongTensor(order)