def experiment(epochs=10, checkpoint=1, train_datapoints=train_datapoints):
    model = Model(Config(), len(INPUT_VOCAB), len(OUTPUT_VOCAB))
    if Config().cuda: model = model.cuda()

    split_index = int(len(train_datapoints) * 0.85)
    train_feed = DataFeed(train_datapoints[:split_index],
                          batchop=batchop,
                          batch_size=128)
    test_feed = DataFeed(train_datapoints[split_index:],
                         batchop=batchop,
                         batch_size=120)

    trainer = Trainer(model=model,
                      loss_function=loss,
                      accuracy_function=accuracy,
                      checkpoint=checkpoint,
                      epochs=epochs,
                      feeder=Feeder(train_feed, test_feed))

    predictor = Predictor(model=model,
                          repr_function=repr_function,
                          feed=test_feed)

    for e in range(1):
        output, results = predictor.predict(
            random.choice(range(test_feed.num_batch)))
        display(HTML(results._repr_html_()))
        del output, results
        trainer.train()
Exemplo n.º 2
0
    def test_predictor(self):
        dataset = CustomDataset("data/")
        image = dataset.get_item(dataset.get_list()[0]).image

        predictor = Predictor()

        area = predictor(image)

        self.assertTrue(isinstance(area, int))
        self.assertGreater(area, 0)
        self.assertLessEqual(area, image.shape[0] * image.shape[1])

        print(area)
        pass
Exemplo n.º 3
0
from argparse import ArgumentParser
from trainer import Predictor

parser = ArgumentParser()
parser.add_argument("--userid", type=str)
parser.add_argument("--imgurl", type=str)
parser.add_argument("--train", action='store_true')
parser.add_argument("--add", action='store_true')
parser.add_argument("--is_pos", action='store_true')
args = parser.parse_args()
userid = args.userid
imgurl = args.imgurl
predictor = Predictor(userid)

pos_file = "../data/" + userid + ".pos"
neg_file = "../data/all_data"
# Assuming the pos file has byeen created by Javascript 
if args.add:
	predictor.add_to_training_set(imgurl, options.is_pos)
elif args.train:
	with open(pos_file, "r") as pos, open(neg_file, "r") as neg:
		pos_data = pos.readlines()
		neg_data = neg.readlines()
	predictor.train(pos_data, neg_data)
else:
	prediction = predictor.predict(imgurl)
	if prediction:
		with open(pos_file, "a") as pos:
			pos.write(imgurl + "\n")
	with open(neg_file, "a") as neg:
		neg.write(imgurl + "\n")
def  experiment(eons=1000, epochs=10, checkpoint=5):
    try:
        try:
            model =  BiLSTMDecoderModel(Config(), len(INPUT_VOCAB), len(CHAR_VOCAB), len(OUTPUT_VOCAB))
            if Config().cuda:  model = model.cuda()
            model.load_state_dict(torch.load('attn_model.pth'))
            log.info('loaded the old image for the model')
        except:
            log.exception('failed to load the model')
            model =  BiLSTMDecoderModel(Config(), len(INPUT_VOCAB), len(CHAR_VOCAB), len(OUTPUT_VOCAB))
            if Config().cuda:  model = model.cuda()

        print('**** the model', model)

        train_feed, test_feed, predictor_feed = {}, {}, {}
        trainer, predictor = {}, {}

        max_size = max( sorted(   [len(i[0]) for i in classified_datapoints.values()]   )[:-1] )
        #max_size = max( sorted(   [len(i[0]) for i in classified_datapoints.values()]   ) )
        
        for label in classified_datapoints.keys():
            if len(classified_datapoints[label][0]) < 1: continue

            label_desc = '-'.join([OUTPUT_VOCAB[l] for l in [i for i, x in enumerate(label) if x == 1]] )
            print('label: {} and size: {}'.format(label, len(classified_datapoints[label][0])))
            train_feed[label]      = DataFeed(label_desc, classified_datapoints[label][0], batchop=batchop, batch_size=max(128, int(len(classified_datapoints[label][0])/600))   )
            test_feed[label]       = DataFeed(label_desc, classified_datapoints[label][1], batchop=batchop, batch_size=32)
            predictor_feed[label]  = DataFeed(label_desc, classified_datapoints[label][1], batchop=batchop, batch_size=12)
            
            turns = int(max_size/train_feed[label].size) + 1            
            trainer[label] = Trainer(name=label_desc,
                                     model=model, 
                                     loss_function=partial(loss, scale=1), accuracy_function=accuracy, f1score_function=f1score_function, 
                                     checkpoint=checkpoint, epochs=epochs,
                                     feeder = Feeder(train_feed[label], test_feed[label]))

            predictor[label] = Predictor(model=model, feed=predictor_feed[label], repr_function=repr_function)

        test_predictor_feed = DataFeed('test', test_datapoints, batchop=test_batchop, batch_size=128)
        test_predictor = Predictor(model=model, feed=test_predictor_feed, repr_function=test_repr_function)

        all_class_train_feed      = MultiplexedDataFeed('atrain',  train_feed.values(), batchop=batchop, batch_size=256)
        all_class_test_feed       = MultiplexedDataFeed('atest',   test_feed.values(), batchop=batchop, batch_size=256)
        all_class_predictor_feed  = MultiplexedDataFeed('apredict',predictor_feed.values(), batchop=batchop, batch_size=256)
        
        all_class_trainer = Trainer(name='all_class_trainer',
                                    model=model, 
                                    loss_function=partial(loss, scale=1), accuracy_function=accuracy, f1score_function=f1score_function, 
                                    checkpoint=checkpoint, epochs=epochs,
                                    feeder = Feeder(all_class_train_feed, all_class_test_feed))
        
        all_class_predictor = Predictor(model=model, feed=all_class_predictor_feed, repr_function=repr_function)

        label_trainer_triples = sorted( [(l, t, train_feed[l].size) for l, t in trainer.items()], key=lambda x: x[2] )
        log.info('trainers built {}'.format(pformat(label_trainer_triples)))

        dump = open('results/experiment_attn.csv', 'w').close()
        for e in range(eons):
            dump = open('results/experiment_attn.csv', 'a')
            dump.write('#========================after eon: {}\n'.format(e))
            dump.close()
            log.info('on {}th eon'.format(e))

            
            if e and not e % 1:
                test_results = ListTable()
                test_dump = open('results/experiment_attn_over_test_{}.csv'.format(e), 'w')
                test_dump.write('|'.join(['id', 'toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']) + '\n')
                log.info('running over test')

                for i in tqdm(range(test_predictor_feed.num_batch)):
                    log.debug('i: {}'.format(i))
                    output, results = test_predictor.predict(i)
                    test_results += results

                test_dump.write(repr(test_results))            
                test_dump.close()

            with open('results/experiment_attn.csv', 'a') as dump:
                output, results = all_class_predictor.predict(random.choice(range(all_class_predictor_feed.num_batch)))
                dump.write(repr(results))
                del output, results
                
            all_class_trainer.train()
            

            """
            for label, _, _ in reversed(label_trainer_triples):
                if not sum(label) and e and not e % 10:  #Avoid neutral classes in every epoch
                    continue
                
                label_desc = '-'.join([OUTPUT_VOCAB[l] for l in [i for i, x in enumerate(label) if x == 1]] )
                log.info('=================================== training for {} datapoints ========================================'.format(label_desc))

                with open('results/experiment_attn.csv', 'a') as dump:
                    output, results = predictor[label].predict(random.choice(range(predictor_feed[label].num_batch)))
                    dump.write(repr(results))
                    del output, results
                
                turns = int(max_size/train_feed[label].size/6) + 1
                log.info('========================  size: {} and turns: {}==========================================='.format(train_feed[label].size, turns))                
                for turn in range(turns):
                    log.info('==================================  label: {} and turn: {}/{}====================================='.format(label_desc, turn, turns))                
                    trainer[label].train()
            """
    except:
        log.exception('####################')
        torch.save(model.state_dict(), open('attn_model.pth', 'wb'))

        return locals()
def experiment(VOCAB,
               LABELS,
               WORD2INDEX,
               LABEL2INDEX,
               raw_samples,
               loss_weight,
               datapoints=[[], []],
               eons=1000,
               epochs=10,
               checkpoint=5):
    try:
        try:
            model = BiLSTMDecoderModel(Config(), len(VOCAB), len(LABELS))
            if Config().cuda: model = model.cuda()
            log.info('loaded the old image for the model, from {}'.format(
                '{}.{}'.format(SELF_NAME, 'pth')))
            model.load_state_dict(torch.load('{}.{}'.format(SELF_NAME, 'pth')))
            log.info('loaded the old image for the model')
        except:
            log.exception('failed to load the model')
            model = BiLSTMDecoderModel(Config(), len(VOCAB), len(LABELS))
            if Config().cuda: model = model.cuda()
        print('**** the model', model)

        name = SELF_NAME
        _batchop = partial(batchop, LABEL2INDEX=LABEL2INDEX)
        train_feed = DataFeed(name,
                              datapoints[0],
                              batchop=_batchop,
                              vocab=WORD2INDEX,
                              batch_size=1024)
        test_feed = DataFeed(name,
                             datapoints[1],
                             batchop=_batchop,
                             vocab=WORD2INDEX,
                             batch_size=256)
        predictor_feed = DataFeed(name,
                                  datapoints[1],
                                  batchop=_batchop,
                                  vocab=WORD2INDEX,
                                  batch_size=128)

        loss_weight = Variable(torch.Tensor(loss_weight))
        if Config.cuda: loss_weight = loss_weight.cuda()
        _loss = partial(loss, loss_function=nn.NLLLoss(loss_weight))
        trainer = Trainer(name=name,
                          model=model,
                          loss_function=_loss,
                          accuracy_function=accuracy,
                          checkpoint=checkpoint,
                          epochs=epochs,
                          feeder=Feeder(train_feed, test_feed))

        predictor = Predictor(model=model,
                              feed=predictor_feed,
                              repr_function=partial(repr_function,
                                                    VOCAB=VOCAB,
                                                    LABELS=LABELS,
                                                    raw_samples=raw_samples))

        for e in range(eons):
            dump = open('results/experiment_attn.csv', 'a')
            dump.write('#========================after eon: {}\n'.format(e))
            dump.close()
            log.info('on {}th eon'.format(e))

            with open('results/experiment_attn.csv', 'a') as dump:
                results = ListTable()
                for ri in range(predictor_feed.num_batch):
                    output, _results = predictor.predict(ri)
                    results.extend(_results)
                dump.write(repr(results))
            if not trainer.train():
                raise Exception

    except:
        log.exception('####################')
        trainer.save_best_model()

        return locals()
def experiment(eons=1000, epochs=1, checkpoint=1):
    try:
        model = BiLSTMDecoderModel(Config(), len(INPUT_VOCAB), len(CHAR_VOCAB),
                                   len(OUTPUT_VOCAB))
        if Config().cuda: model = model.cuda()

        classified_train_feed = DataFeed(classified_datapoints[0],
                                         batchop=batchop,
                                         batch_size=128)
        classified_test_feed = DataFeed(classified_datapoints[1],
                                        batchop=batchop,
                                        batch_size=128)

        classified_trainer = Trainer(model=model,
                                     loss_function=loss,
                                     accuracy_function=accuracy,
                                     checkpoint=checkpoint,
                                     epochs=epochs,
                                     feeder=Feeder(classified_train_feed,
                                                   classified_test_feed))

        classified_predictor_feed = DataFeed(classified_datapoints[1],
                                             batchop=batchop,
                                             batch_size=12)
        classified_predictor = Predictor(model=model,
                                         feed=classified_predictor_feed,
                                         repr_function=repr_function)

        non_classified_train_feed = DataFeed(non_classified_datapoints[0],
                                             batchop=batchop,
                                             batch_size=128)
        non_classified_test_feed = DataFeed(non_classified_datapoints[1],
                                            batchop=batchop,
                                            batch_size=128)
        non_classified_trainer = Trainer(model=model,
                                         loss_function=loss,
                                         accuracy_function=accuracy,
                                         checkpoint=checkpoint,
                                         epochs=epochs,
                                         feeder=Feeder(
                                             non_classified_train_feed,
                                             non_classified_test_feed))

        non_classified_predictor_feed = DataFeed(non_classified_datapoints[1],
                                                 batchop=batchop,
                                                 batch_size=12)
        non_classified_predictor = Predictor(
            model=model,
            feed=non_classified_predictor_feed,
            repr_function=repr_function)

        test_predictor_feed = DataFeed(test_datapoints,
                                       batchop=test_batchop,
                                       batch_size=128)
        test_predictor = Predictor(model=model,
                                   feed=test_predictor_feed,
                                   repr_function=test_repr_function)

        dump = open('results/experiment_attn.csv', 'w')
        for e in range(eons):
            dump.write('#========================after eon: {}\n'.format(e))
            log.info('on {}th eon'.format(e))
            output, results = classified_predictor.predict(
                random.choice(range(classified_predictor_feed.num_batch)))
            dump.write(repr(results))
            del output, results
            output, results = non_classified_predictor.predict(
                random.choice(range(non_classified_predictor_feed.num_batch)))
            dump.write(repr(results))
            del output, results

            #non_classified_trainer.train()
            #for i in range(int(non_classified_train_feed.size/classified_train_feed.size)):
            classified_trainer.train()

            if e and not e % 10:
                test_results = ListTable()
                test_dump = open(
                    'results/experiment_attn_over_test_{}.csv'.format(e), 'w')
                test_dump.write('|'.join([
                    'id', 'toxic', 'severe_toxic', 'obscene', 'threat',
                    'insult', 'identity_hate'
                ]) + '\n')
                log.info('running over test')

                for i in tqdm(range(test_predictor_feed.num_batch)):
                    log.debug('i: {}'.format(i))
                    output, results = test_predictor.predict(i)
                    test_results += results

                test_dump.write(repr(test_results))
                test_dump.close()

    except KeyboardInterrupt:
        torch.save(model.state_dict(), open('attn_model.pth', 'wb'))
        dump.close()
        return locals()