Esempio n. 1
0
def entity_build(request):
    """ """
    site = request.matchdict['code']
    eid = request.matchdict['id']
    claims, site = verify_access(request, site=site)
    e = Entity(request)
    e.build()

    return { 'started': True, 'name': site['name'], 'entity': eid }
Esempio n. 2
0
def entity_build(request):
    """ """
    site = request.matchdict['code']
    eid = request.matchdict['id']
    claims, site = verify_access(request, site=site)
    e = Entity(request)
    e.build()

    return {'started': True, 'name': site['name'], 'entity': eid}
Esempio n. 3
0
def _extract(params, data):
    train_batches, dev_batches, test_batches, vocabs, embedding = data
    hate_train_batches = [train for train in train_batches if train["labels"] == 1]
    hate_dev_batches = [dev for dev in dev_batches if dev["labels"] == 1]
    hate_test_batches = [test for test in test_batches if test["labels"] == 1]

    t_weights = np.array([1 - (Counter([train["target_label"] for train in hate_train_batches])[i] /
                               len(hate_train_batches)) for i in range(8)])
    a_weights = np.array([1 - (Counter([train["action_label"] for train in hate_train_batches])[i] /
                               len(hate_train_batches)) for i in range(4)])
    entity = Entity(params, vocabs, embedding)
    entity.build()
    if args.goal == "train":
        entity.run_model(BatchIt(hate_train_batches, params["batch_size"], vocabs),
                         BatchIt(hate_dev_batches, params["batch_size"], vocabs),
                         BatchIt(hate_test_batches, params["batch_size"], vocabs),
                         (t_weights, a_weights))
    elif args.goal == "predict":
        unlabeled_batches = _load_unlabeled(params, args, vocabs)
        target, action = entity.predict(unlabeled_batches, (t_weights, a_weights))
        pickle.dump(target, open("Data/" + args.dataset + "/targets.pkl", "wb"))
        pickle.dump(action, open("Data/" + args.dataset + "/actions.pkl", "wb"))