Пример #1
0
 def test_time_distributed_reshapes_correctly(self):
     char_embedding = Embedding(2, 2)
     char_embedding.weight = Parameter(torch.FloatTensor([[.4, .4], [.5, .5]]))
     distributed_embedding = TimeDistributed(char_embedding)
     char_input = torch.LongTensor([[[1, 0], [1, 1]]])
     output = distributed_embedding(char_input)
     assert_almost_equal(output.data.numpy(),
                         [[[[.5, .5], [.4, .4]], [[.5, .5,], [.5, .5]]]])
Пример #2
0
 def test_time_distributed_reshapes_positional_kwarg_correctly(self):
     char_embedding = Embedding(2, 2)
     char_embedding.weight = Parameter(torch.FloatTensor([[.4, .4], [.5, .5]]))
     distributed_embedding = TimeDistributed(char_embedding)
     char_input = torch.LongTensor([[[1, 0], [1, 1]]])
     output = distributed_embedding(input=char_input)
     assert_almost_equal(output.data.numpy(),
                         [[[[.5, .5], [.4, .4]], [[.5, .5], [.5, .5]]]])
Пример #3
0
        def __copyEmbeddings(embeddings: nn.Embedding, index1, index2):
            #print(embeddings.weight.size())
            weight = embeddings.weight.detach().numpy()

            weight[index2] = deepcopy(weight[index1])

            weight = torch.tensor(weight, requires_grad=True)
            embeddings.weight = nn.Parameter(weight, requires_grad=True)
Пример #4
0
def run_cora():
    np.random.seed(1)
    random.seed(1)
    num_nodes = 2708
    feat_data, labels, adj_lists = load_cora()
    features = Embedding(2708, 1433)
    features.weight = Parameter(torch.tensor(feat_data, dtype=torch.float),
                                requires_grad=False)

    agg1 = MeanAggregator(features)
    enc1 = Encoder(features, 1433, 128, adj_lists, agg1, gcn=True)
    agg2 = MeanAggregator(lambda nodes: enc1(nodes).t())
    enc2 = Encoder(lambda nodes: enc1(nodes).t(),
                   enc1.embed_dim,
                   128,
                   adj_lists,
                   agg2,
                   base_model=enc1,
                   gcn=True)
    enc1.num_samples = 5
    enc2.num_samples = 5

    graphsage = SupervisedGraphSage(7, enc2)
    rand_indices = np.random.permutation(num_nodes)
    test = rand_indices[:1000]
    val = rand_indices[1000:1500]
    train = list(rand_indices[1500:])

    all_grad_params = filter(lambda p: p.requires_grad, graphsage.parameters())
    optimizer = torch.optim.SGD(all_grad_params, lr=0.7)
    times = []
    for batch in range(100):
        batch_nodes = train[:256]
        random.shuffle(train)
        start_time = time.time()
        optimizer.zero_grad()
        loss = graphsage.loss(batch_nodes,
                              torch.tensor(labels[np.array(batch_nodes)]))
        loss.backward()
        optimizer.step()
        end_time = time.time()
        times.append(end_time - start_time)
        print(batch, loss.item())

    val_output = graphsage.forward(val)
    print(
        "Validation F1:",
        f1_score(labels[val],
                 val_output.data.numpy().argmax(axis=1),
                 average="micro"))
    print("Average batch time:", np.mean(times))
Пример #5
0
def update_weights(emb: nn.Embedding, weights: Tensor) -> None:
    emb.weight = nn.Parameter(weights)
Пример #6
0
max_length = 20

dataset = DyadDataset()
dyad_lut = dataset.get_dyad_lut()

embs_path = DATA_PATH + EMBS_FILE
cuda = True

with open(embs_path, 'rb') as f:
    emb_params = pickle.load(f)
num_entities, dim = emb_params.size()

emb = Embedding(num_entities, dim)
status_emb = StatusEmb(dim, dyad_lut, cuda)

emb.weight = Parameter(emb_params)

encoder = DyadEncoder(emb, dim, cuda=cuda)
decoder = DyadDecoder(emb, dim, num_entities, cuda=cuda)

encoder_optim = torch.optim.Adam(encoder.parameters(), lr=10e-4)
decoder_optim = torch.optim.Adam(decoder.parameters(), lr=10e-4)
status_optim = torch.optim.Adam(status_emb.parameters(), lr=10e-4)

SOS_token = 0
criterion = torch.nn.NLLLoss()
i = 0
for item in dataset:
    dyad, acts = item

    if len(acts) > 1:
Пример #7
0
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import random
from sklearn.metrics import r2_score
from torch.nn.utils.rnn import pack_padded_sequence

EMBS_FILE = 'embs.pickle'
BATCH_SIZE = 1000
CUDA = True
NUM_EPOCHS = 2

with open(EMBS_FILE, 'rb') as f:
    emb_params = pickle.load(f)
num_entities, dim = emb_params.size()
emb = Embedding(num_entities, dim, padding_idx=0)
emb.weight = Parameter(emb_params, requires_grad=True)


data = []
lstm = torch.nn.LSTM(100, 100, bidirectional=True)
model = torch.nn.Sequential(
    #torch.nn.Linear(10, 10),
    #torch.nn.LeakyReLU(),
    torch.nn.Dropout(),
    torch.nn.Linear(100, 30),
    torch.nn.LeakyReLU(),
    #torch.nn.Linear(30, 10),
    #torch.nn.LeakyReLU(),
    torch.nn.Linear(30, 2), 
    # torch.nn.Softplus(),
)