Esempio n. 1
0
def main():
    str1 = input("Please input path:")
    template = train.learn(r'C:\Users\hp\Desktop\code\code\easy\train')
    with open('result.csv', 'w', newline='') as f:
        writer = csv.writer(f)
        writer.writerow(('name', 'code'))
        for home, dir, files in os.walk(str1):
            for filename in files:
                numList = pictureProcess.imageProcess(
                    os.path.join(home, filename))
                result = ''
                for i in numList:
                    distance = []
                    for j in template:
                        distance.append(calDistance(i, j))
                    result += str(distance.index(min(distance)))
                writer.writerow((filename, result))
Esempio n. 2
0
def main():
    all=learn(r'C:\Users\hp\Desktop\code\code\hard\train')
    str1=input("Please input path:")
    with open('result.csv','w',newline='') as f:
        writer = csv.writer(f)
        writer.writerow(('name','code'))
        for home,dir,files in os.walk(str1):
            for filename in files:
                info = preProcess(os.path.join(home, filename))
                answer=''
                for i in info:
                    vote=[]
                    #print(len(i))
                    distance=[]
                    for j in all:
                        distance.append(calDistance(j[0],i))
                    index=distance.index(min(distance))
                    vote.append([distance[index],all[index][1]])
                    distance[index]=1000
                    index=distance.index(min(distance))
                    vote.append([distance[index],all[index][1]])
                    distance[index]=1000
                    index=distance.index(min(distance))
                    vote.append([distance[index],all[index][1]])
                    distance[index]=1000
                    if(vote[0][1]==vote[1][1]):
                        answer+=str(vote[0][1])
                        continue
                    elif (vote[0][1]==vote[2][1]):
                        answer+=str(vote[0][1])
                        continue
                    elif (vote[1][1]==vote[2][1]):
                        answer+=str(vote[1][1])
                        continue
                    else:
                        answer+=str(vote[0][1])
                writer.writerow((filename,answer))
                

    




    '''
Esempio n. 3
0
def match(query):
    res = matchQuerySkipgram(bootstrapQuery(query),learn())
    return res
Esempio n. 4
0
except:
    signals, labels = get_ecg(PATH, length=LENGTH)
    segments = np.zeros((245990, 1001))
    k = 0

    for i, record in enumerate(signals):
        rp = qrs_detection(record, sample_rate=FS)
        seg = get_segments(record, rp, labels[i])
        if seg is not None:
            segments[k:k + seg.shape[0], :] = seg
            k += seg.shape[0]
    del signals, labels

    np.save('./data/segment.npy', segments)

X, y = segments[:, :-1], segments[:, -1][:, np.newaxis]
del segments

train, test = build_dataloader(X, y, resamp=RESAMP, batch_size=BATCH_SIZE)
del X, y

net = cnn_feed_lstm()
try:
    params = torch.load("../params/net_0.81.pkl")
    net.load_state_dict(params["model_state_dict"])
except:
    pass

loss, val_score = learn(net, train, test, lr=LR, epoch=EPOCH)
plot(loss, val_score)
Esempio n. 5
0
        "optimizer": optimizer_name,
        "loss": loss_name,
        "scheduler": scheduler_name,
        "tolerance_es": tolerance,
        "delta_es": delta,
        "gamma_scheduler": gamma_scheduler,
        "top_k": top_k,
        "num_warmup": num_warmup_steps,
        "lr_layer_decay": lr_layerdecay,
        "freeze": freeze,
        "alpha_link": alpha_link,
        "comment": comment,
    }
    # Module used in the model
    modules = {
        "model": model,
        "optimizer": optimizer,
        "scheduler": scheduler,
        "loss_function": loss_function,
        "device": device,
        "early_stopper": early_stopper,
    }
    # Train the model
    learn(
        train_dataloader,
        val_dataloader,
        modules,
        hyperparameters,
        experiment_name="seed",
    )
    attention = Attention(attention_dim, attention_dim, attention_dim)
    model = Classifier(embedding, encoder, attention, attention_dim, nlabels)
    model.to(device)

    criterion = nn.MultiLabelSoftMarginLoss()
    optimizer = torch.optim.Adam(model.parameters(),
                                 model_argument['lr'],
                                 amsgrad=True)

    R = []
    P = []
    try:
        best_valid_loss = None

        for epoch in range(1, model_argument['epochs'] + 1):
            loss_train = learn(model, train_iter, optimizer, criterion)
            print("[{} loss]: {:.5f}".format('Train', loss_train))
            loss, r, p = evaluate(model, test_iter, criterion)
            R.append(r)
            P.append(p)

            if not best_valid_loss or loss < best_valid_loss:
                best_valid_loss = loss

        np.save('recall.npy', R)
        np.save('precison.npy', P)

    except KeyboardInterrupt:
        print("[Ctrl+C] Training stopped!")

    torch.save(model, 'training_history/mlc_20180903.pt')
Esempio n. 7
0
def match(query):
    res = matchQuerySkipgram(bootstrapQuery(query), learn())
    return res
Esempio n. 8
0
#     for _ in range(n_drones)
# ]
# optimizers = [optim.AdamW(nets[i].parameters(), lr=args.lr) for i in range(n_drones)]

# train(
#     args=args,
#     nets=nets,
#     optimizers=optimizers,
#     env=env,
#     obs_size=state_size,
#     n_drones=n_drones,
# )


actor_critics = [
    Policy((state_size + 2,), (action_size,), base_kwargs={"recurrent": True})
    for _ in range(n_drones)
]
optimizers = [
    optim.RMSprop(actor_critics[i].parameters(), lr=args.lr) for i in range(n_drones)
]

learn(
    args=args,
    actor_critics=actor_critics,
    optimizers=optimizers,
    env=env,
    obs_size=state_size,
    n_drones=n_drones,
)
Esempio n. 9
0
                               [nn.Softplus()] * nmlp + [None])
        for _ in range(nlayers)
    ], [
        utils.SimpleMLPreshape(
            [num] + [hidden] * nmlp + [num],
            [nn.Softplus()] * nmlp + [utils.ScalableTanh(num)])
        for _ in range(nlayers)
    ])
    return fl


from utils import flowBuilder

f = flowBuilder(n,
                numFlow,
                innerBuilder,
                1,
                relax=args.relax,
                shift=args.shift).to(device)

if not args.double:
    f = f.to(torch.float32)

LOSS = train.learn(target,
                   f,
                   batchSize,
                   epochs,
                   lr,
                   saveSteps=lossPlotStep,
                   savePath=rootFolder)