Exemple #1
0
    model.apply(init_weights)
    print(f'The model has {count_parameters(model):,} train parameters')

    optimizer = optim.Adam(model.parameters())

    # weight_CE = torch.FloatTensor([0.1,1,1,1,1,1,1,1,1,1]).to(device)
    # criterion = nn.CrossEntropyLoss(weight = weight_CE)
    criterion = nn.CrossEntropyLoss()

    # train data
    train_paths = glob.glob("data/train/*.json")
    test_paths = glob.glob("data/test/*.json")
    train_steps = len(train_paths) // config.s2s_batch_size + 1
    test_steps = len(test_paths) // config.s2s_batch_size + 1
    train_iterator = gen(train_paths, config.s2s_batch_size,
                         config.max_box_num, device)
    test_iterator = gen(test_paths, config.s2s_batch_size, config.max_box_num,
                        device)

    best_valid_loss = float('inf')

    for epoch in range(config.s2s_epoch):

        start_time = time.time()
        train_loss, train_acc = train(model, train_steps, train_iterator,
                                      optimizer, criterion, config.s2s_clip)
        valid_loss, valid_acc = evaluate(model, test_steps, test_iterator,
                                         criterion)

        end_time = time.time()
        epoch_mins, epoch_secs = epoch_time(start_time, end_time)
Exemple #2
0
    attn = Attention(config.s2s_enc_hid, config.s2s_dec_hid)

    enc = Encoder(config.s2s_emb_dim, config.s2s_enc_hid, config.s2s_dec_hid,
                  config.s2s_enc_dropout)

    dec = Decoder(len(config.class_char), config.s2s_emb_dim,
                  config.s2s_enc_hid, config.s2s_dec_hid,
                  config.s2s_enc_dropout, attn)

    model = Seq2Seq(enc, dec, device).to(device)

    model.apply(init_weights)
    model.load_state_dict(torch.load('weight/s2s.pt'))
    model.eval()

    data = gen(["data/test/96.json"], 1, config.max_box_num, device)

    with torch.no_grad():
        src, trg = next(data)
        output = model(src)
        output = output.permute(1, 0,
                                2).contiguous().view(-1,
                                                     len(config.class_char))
        output = torch.max(F.softmax(output, dim=1), 1)
        possible, label = output.values, output.indices
        acc = np.mean((label == trg).cpu().numpy())

        print("trget:", trg.long())
        print("label:", label)
        print("possible:", possible)
        print("acc:", acc)
Exemple #3
0
    attn = Attention(config.s2s_enc_hid, config.s2s_dec_hid)

    enc = Encoder(config.s2s_emb_dim, config.s2s_enc_hid, config.s2s_dec_hid,
                  config.s2s_enc_dropout)

    dec = Decoder(len(config.class_char), config.s2s_emb_dim,
                  config.s2s_enc_hid, config.s2s_dec_hid,
                  config.s2s_enc_dropout, attn)

    model = Seq2Seq(enc, dec, device).to(device)

    model.apply(init_weights)
    model.load_state_dict(torch.load('weight/s2s.pt'))
    model.eval()

    data = gen(["data/test/OTDR_type2_64.json"], 1, config.max_box_num, device)

    with torch.no_grad():
        src, trg = next(data)
        output = model(src)
        output = output.permute(1, 0,
                                2).contiguous().view(-1,
                                                     len(config.class_char))
        output = torch.max(F.softmax(output, dim=1), 1)
        possible, label = output.values, output.indices
        acc = np.mean((label == trg).cpu().numpy())

        print("trget:", trg.long())
        print("label:", label)
        print("possible:", possible)
        print("acc:", acc)