Example #1
0
def get_media(data):
    "return media url list from entities"
    medias = []
    if not data:
        return None
    if not data.get("media"):
        return None
    for m in data.get("media"):
        if m.get("media_url"):
            medias.append(m.get("media_url"))
    return medias
def run(sess, f, data):
    # load graph
    _, output_size = data.io_shape
    inputs, _, layer_activations, feed_dicts = m.get(f.model).load_model(sess, f.model_meta, f.model_checkpoint, output_size)

    with sess.as_default():
        layerwise_stats = [] # in order, from bottom to topmost activation
        for layer_activation, size in layer_activations:
            print('computing stats for {}'.format(layer_activation))
            stats = compute_layerwise_statistics(sess, layer_activation, size, inputs, data, feed_dicts, f.loss)
            layerwise_stats.append(stats)
        all_layers_stats = None

        if f.compute_graphwise_stats:
            print('computing stats for entire network')
            all_layers_stats = compute_graphwise_statistics(sess, layer_activations, inputs, data, feed_dicts)
        else:
            all_layers_stats = None

        all_stats = layerwise_stats, all_layers_stats

    stats_dir = os.path.join(f.summary_folder, f.run_name, 'stats')
    u.ensure_dir_exists(stats_dir)
    stats_file = os.path.join(stats_dir, 'activation_stats_{}.npy'.format(f.run_name))
    np.save(stats_file, all_stats)
    print('stats saved in {}'.format(stats_file))
Example #3
0
def show(doc):
    password = models.get('password:%s' % doc)
    if password:
        return template('pass', doc=doc)
    user = handler.get_rand()
    now = time.time()
    return template('test', user=user, timestamp=now, doc=doc, password='')
Example #4
0
def show(doc):
    password = models.get("password:%s" % doc)
    if password:
        return template("pass", doc=doc)
    user = handler.get_rand()
    now = time.time()
    return template("test", user=user, timestamp=now, doc=doc, password="")
Example #5
0
def main():
    # Add seed
    random_seed = 42
    torch.manual_seed(random_seed)
    args = parser.get()
    X_train = load('./datas/X_train.npy')
    y_train = load('./datas/y_train.npy')
    X_test = load('./datas/X_test.npy')
    train_dataset = data.DatasetXy(X_train, y_train)
    test_dataset = data.DatasetX(X_test)
    data_class = data.Dataloader(args, train_dataset, test_dataset)

    train, test = data_class.train(), data_class.test()

    model = models.get(args)
    optimizer = optimizers.get(args, model.parameters())
    criterion = torch.nn.CrossEntropyLoss()

    for epoch in range(args.epochs):
        train_metrics = runner.run(
            model,
            criterion,
            optimizer,
            train,
            True,
            {
                "loss": metrics.loss,
                "accuracy": metrics.accuracy
            },
        )
        metrics.print_metrics(train_metrics)

    y_test_pred = runner.run(
        model,
        criterion,
        optimizer,
        test,
        False,
        {
            "loss": metrics.loss,
            "accuracy": metrics.accuracy
        },
    )

    print(y_test_pred)
    y_test_pred = [item for sublist in y_test_pred for item in sublist]
    #print((y_test_pred[0]).shape)
    #_, y_pred = torch.max(y_test_pred, dim = 1)

    #y_pred = torch.round(y_test_pred)
    # _, y_pred = torch.max(y_test_pred, dim = 1)
    # y_pred = y_pred.cpu().numpy()
    #print(len(y_pred_list))
    #print(y_pred.type)
    y_test = np.asarray(y_test_pred)
    pd.DataFrame({
        "Id": np.arange(len(y_test)),
        "Category": y_test
    }).astype(int).to_csv("solution.csv", index=False)
Example #6
0
 def post(self, id):
     with session_scope():
         meetme = meetme_dao.get(id)
         form = MeetmeForm(obj=meetme)
         if form.validate_on_submit():
             form.populate_obj(meetme)
             meetme_dao.edit(meetme)
     return redirect(url_for('meetme.Meetme:index'))
Example #7
0
 def testGetModel(self):
     model = models.get('genomics_cnn',
                        batch_size=8,
                        len_seqs=250,
                        num_motifs=16,
                        len_motifs=20,
                        num_denses=32)
     self.assertEqual(10, len(model.layers))
Example #8
0
def share(doc):
    response.content_type = 'text/javascript; charset=utf-8'
    text = models.get('doc:%s' % doc)
    if not text:
        text = ''
    else:
        text = text[1]
    return render.get_js(text)
Example #9
0
 def post(self, id):
     with session_scope():
         meetme = meetme_dao.get(id)
         form = MeetmeForm(obj=meetme)
         if form.validate_on_submit():
             form.populate_obj(meetme)
             meetme_dao.edit(meetme)
     return redirect(url_for('meetme.Meetme:index'))
Example #10
0
def share(doc):
    response.content_type = "text/javascript; charset=utf-8"
    text = models.get("doc:%s" % doc)
    if not text:
        text = ""
    else:
        text = text[1]
    return render.get_js(text)
Example #11
0
    def get(self, unused_request):
        """ Get current user marketplace.
        """

        marketplaceModel = models.get(user.get_current_user().email())

        return messages.MarketplaceGetMessage(
            name=marketplaceModel.name,
            created_date=marketplaceModel.created_date)
Example #12
0
    def get(self, unused_request):
        """ Get current user marketplace.
        """

        marketplaceModel = models.get(user.get_current_user().email())

        return messages.MarketplaceGetMessage(
            name=marketplaceModel.name,
            created_date=marketplaceModel.created_date)
Example #13
0
def first():
    doc = request.forms.get("doc")
    text = models.get("doc:%s" % doc)
    now = time.time()
    if text:
        now, text = text
    else:
        text = ""
    return json.dumps({"txt": text, "timestamp": now})
Example #14
0
def pass_show(passw, doc):
    password = models.get("password:%s" % doc)
    if not password:
        password = ""
    if password != passw:
        return "error"
    user = handler.get_rand()
    now = time.time()
    return template("test", user=user, timestamp=now, doc=doc, password=passw)
Example #15
0
def get_patch(doc, parent, user):
    timestamp = models.get('last:%s' % doc)
    if not timestamp or timestamp <= parent:
        return False
    diff = models.get(doc)
    if not diff:
        diff = []
    ret = []
    version = 0
    for i in reversed(diff):
        if i['timestamp'] > timestamp:
            timestamp = i['timestamp']
        if i['timestamp'] == parent:
            break
        if i['user'] == user and i['version'] > version:
            version = i['version']
        ret = i['diff'] + ret
    return (timestamp, version, ret)
Example #16
0
def pass_show(passw, doc):
    password = models.get('password:%s' % doc)
    if not password:
        password = ''
    if password != passw:
        return 'error'
    user = handler.get_rand()
    now = time.time()
    return template('test', user=user, timestamp=now, doc=doc, password=passw)
Example #17
0
File: tq.py Project: adajw/Laura
	def post(self):
		''' Get a specific fingerprint by key, and insert it'''
		# get blip key
		key = self.request.get('key')
		f = models.get(key)
		models.insert(f, False)
		for i in f.fingerprint_set:
			# process children
			doitlater.insert_blip(i)
Example #18
0
def first():
    doc = request.forms.get('doc')
    text = models.get('doc:%s' % doc)
    now = time.time()
    if text:
        now, text = text
    else:
        text = ''
    return json.dumps({'txt': text, 'timestamp': now})
Example #19
0
def get_patch(doc, parent, user):
    timestamp = models.get('last:%s' % doc)
    if not timestamp or timestamp <= parent:
        return False
    diff = models.get(doc)
    if not diff:
        diff = []
    ret = []
    version = 0
    for i in reversed(diff):
        if i['timestamp'] > timestamp:
            timestamp = i['timestamp']
        if i['timestamp'] == parent:
            break
        if i['user'] == user and i['version'] > version:
            version = i['version']
        ret = i['diff']+ret
    return (timestamp, version, ret)
Example #20
0
    def __init__(self,
                 features: Features = Features(),
                 device=torch.device('cpu')):
        self.features = features
        self.device = device

        self.model = models.get(
            age=features.age,
            gender=features.gender,
            pretrained='models/integrated.pt',
        ).to(self.device)
Example #21
0
def validate_and_init_rennet_model(model_fp):
    try:
        with hf(model_fp, 'r') as f:
            minver = f['rennet'].attrs['version_min']
            srcver = f['rennet'].attrs['version_src']
            modelname = f['rennet/model'].attrs['name']

        mu.validate_rennet_version(minver, srcver)
        return m.get(modelname)(model_fp)
    except KeyError:
        raise RuntimeError("Invalid model file: {}".format(model_fp))
Example #22
0
	def get(self, unused_request):
		"""Retornar a loja do usuário.
		"""

		logging.debug('Executando endpoint para obter a marketplace (loja) do usuário')

		#obter marketplaces (lojas) do usuário 
		marketplaceModel = models.get(user.get_current_user().email())

		#retorna a marketplace (loja) do usuário
		return messages.MarketplaceGetMessage(name=marketplaceModel.name, created_date=marketplaceModel.created_date)
Example #23
0
def change_pass():
    password = request.forms.get('password')
    newpass = request.forms.get('newpassword')
    if not newpass:
        newpass = password
    doc = request.forms.get('doc')
    old = models.get('password:%s' % doc)
    if not old:
        old = ''
    if old != password:
        return 'error'
    models.set('password:%s' % doc, newpass)
    bottle.redirect('/pass/%s/%s/' % (newpass, doc))
Example #24
0
def send():
    diff = json.loads(request.forms.get('diff'))
    parent = float(request.forms.get('parent'))
    doc = request.forms.get('doc')
    version = int(request.forms.get('version'))
    user = request.forms.get('user')
    password = request.forms.get('password')
    old = models.get('password:%s' % doc)
    if not old:
        old = ''
    if not valid_doc(doc) or old != password:
        return 'error'
    handler.update(doc, parent, user, version, diff)
Example #25
0
def change_pass():
    password = request.forms.get("password")
    newpass = request.forms.get("newpassword")
    if not newpass:
        newpass = password
    doc = request.forms.get("doc")
    old = models.get("password:%s" % doc)
    if not old:
        old = ""
    if old != password:
        return "error"
    models.set("password:%s" % doc, newpass)
    bottle.redirect("/pass/%s/%s/" % (newpass, doc))
Example #26
0
    def GET(self, postid):
        try:
            # increase pageview if necessary
            if is_new_pageview(postid):
                models.inc_pageviews(postid)

            p = models.get(postid)
            if p:
                return render.view(p, admin())
            else:
                raise web.notfound()
        except Exception:
            raise web.notfound()
Example #27
0
def send():
    diff = json.loads(request.forms.get("diff"))
    parent = float(request.forms.get("parent"))
    doc = request.forms.get("doc")
    version = int(request.forms.get("version"))
    user = request.forms.get("user")
    password = request.forms.get("password")
    old = models.get("password:%s" % doc)
    if not old:
        old = ""
    if not valid_doc(doc) or old != password:
        return "error"
    handler.update(doc, parent, user, version, diff)
Example #28
0
    def get(self, unused_request):
        """Retornar a loja do usuário.
		"""

        logging.debug(
            'Executando endpoint para obter a marketplace (loja) do usuário')

        #obter marketplaces (lojas) do usuário
        marketplaceModel = models.get(user.get_current_user().email())

        #retorna a marketplace (loja) do usuário
        return messages.MarketplaceGetMessage(
            name=marketplaceModel.name,
            created_date=marketplaceModel.created_date)
Example #29
0
    def get(self, request):
        """ Get customer by id.
        """

        # Get customer by id
        customerModel = models.get(request.id)

        # Return
        return CustomerGetMessage(id=customerModel.key.id(),
                                  name=customerModel.name,
                                  email=customerModel.email,
                                  phone=customerModel.phone,
                                  location=customerModel.location,
                                  created_date=customerModel.created_date)
Example #30
0
def index(request, param = ""):
    fh = open('./ws_django/spc/fractal_canvas/index.html', 'r')
    html = fh.read()
    fh.close()

    if param == "":
        html = html.replace('__jscode__', jsCode)
        return HttpResponse(html)
    else:
        b64JS = models.get(param)
        js = base64.b64decode(b64JS)
        html = html.replace('__jscode__', js)
        return HttpResponse(html)

    return HttpResponse("toto")
Example #31
0
    def POST(self, postid):
        try:
            i = web.input(do_recommend=False)
            do_recommend = bool(i.do_recommend)

            web.header('Content-Type', 'application/json')

            if do_recommend and is_new_star(postid):
                models.inc_starred(postid)
                new_starred = models.get(postid).get('starred', 0)
                return json.dumps({'status': 'ok', 'starred': new_starred})
            else:
                return json.dumps({'status': 'repeated'})
        except Exception:
            return json.dumps({'status': 'failed'})
Example #32
0
    def get(self, request):
        """ Get customer by id.
        """

        # Get customer by id
        customerModel = models.get(request.id)

        # Return
        return CustomerGetMessage(
            id=customerModel.key.id(),
            name=customerModel.name,
            email=customerModel.email,
            phone=customerModel.phone,
            location=customerModel.location,
            created_date=customerModel.created_date)
Example #33
0
def index(request, param=""):
    fh = open('./ws_django/spc/fractal_canvas/index.html', 'r')
    html = fh.read()
    fh.close()

    if param == "":
        html = html.replace('__jscode__', jsCode)
        return HttpResponse(html)
    else:
        b64JS = models.get(param)
        js = base64.b64decode(b64JS)
        html = html.replace('__jscode__', js)
        return HttpResponse(html)

    return HttpResponse("toto")
Example #34
0
def log(obj):
    o = models.get(obj)
    if not o:
        return {} 
    query = {}
    if models.is_file(obj):
        query = {'_file': o['_id']}
    else:
        query = {'_work': o['_id']}
    revisions = []
    for rev in models.history.find(query).sort('date'):
        rev = models._cleanup(rev)
        revisions.append(rev)
    return {
        'revisions': revisions
    }
Example #35
0
def main():
    # Add seed
    args = parser.get()

    data_class = data.Dataset(args)
    train, validation = data_class.train(), data_class.validation()

    model = models.get(args)
    optimizer = optimizers.get(args, model.parameters())
    criterion = torch.nn.CrossEntropyLoss()

    for epoch in range(args.epochs):
        train_metrics = runner.run(
            model,
            criterion,
            optimizer,
            train,
            True,
            {
                "loss": metrics.loss,
                "accuracy": metrics.accuracy
            },
        )
        metrics.print_metrics(train_metrics)
        validation_metrics = runner.run(
            model,
            criterion,
            optimizer,
            validation,
            False,
            {
                "loss": metrics.loss,
                "accuracy": metrics.accuracy
            },
        )
        metrics.print_metrics(validation_metrics)
Example #36
0
	def GET(self):
		galleryItem = models.get('items')
		return render.index(galleryItem)
Example #37
0
def update(doc, parent, user, version, diff):
    if not diff:
        return
    models.lock_acquire(doc)
    now = time.time()
    patch = models.get(doc)
    if not patch:
        patch = []
    pre = []
    version_count = models.get('version:%s:%s' % (user, doc))
    if not version_count:
        version_count = 0
    version_max = models.get('versionmax:%s:%s' % (user, doc))
    if not version_max:
        version_max = 0
    version_time = models.get('versiontime:%s:%s' % (user, doc))
    if not version_time:
        version_time = 0
    same = []
    if parent != version_time:
        models.set('version:%s:%s' % (user, doc), 1, now + 60)
        models.set('versionmax:%s:%s' % (user, doc), version, now + 60)
        models.set('versiontime:%s:%s' % (user, doc), parent, now + 60)
        if version == 1:
            same = [(version, diff)]
        else:
            models.set('versions:%s:%s' % (user, doc), [(version, diff)],
                       now + 60)
    else:
        same = models.get('versions:%s:%s' % (user, doc))
        if not same:
            same = []
        version_count += 1
        models.set('version:%s:%s' % (user, doc), version_count, now + 60)
        if version > version_max:
            version_max = version
        models.set('versionmax:%s:%s' % (user, doc), version_max, now + 60)
        if version_count == version_max:
            same.append((version, diff))
            models.delete('versions:%s:%s' % (user, doc))
        else:
            models.set('versions:%s:%s' % (user, doc),
                       same + [(version, diff)], now + 60)
            same = []
    if not same:
        models.lock_release(doc)
        return
    same = sorted(same)
    version = same[0][0]
    for i in reversed(patch):
        if i['timestamp'] == parent or (i['user'] == user
                                        and i['version'] + 1 == version):
            break
        pre = i['diff'] + pre
    diff = []
    for i in same:
        diff += utils.forward(pre, i[1])
    version = same[-1][0]
    ret = {
        'parent': parent,
        'timestamp': now,
        'user': user,
        'version': version,
        'diff': diff
    }
    models.set(doc,
               filter(lambda x: x['timestamp'] >= now - 60, patch) + [ret])
    models.set('last:%s' % doc, now)
    text = models.get('doc:%s' % doc)
    if text:
        text = text[1]
    else:
        text = ''
    text = utils.text_patch(text, diff)
    models.set('doc:%s' % doc, (now, text))
    models.lock_release(doc)
Example #38
0
 def get(self, id):
     with session_scope():
         meetme = meetme_dao.get(id)
         form = MeetmeForm(obj=meetme)
         return render_template('meetme_form.html', form=form)
Example #39
0
 def get(self, id):
     with session_scope():
         meetme = meetme_dao.get(id)
         form = MeetmeForm(obj=meetme)
         return render_template('meetme_form.html', form=form)
Example #40
0
def run(sess, f, data):
    # load data that will be used for evaluating the distillation process
    eval_data = d.get(f.eval_dataset, f)

    # load teacher graph
    _, output_size = data.io_shape
    inputs, teacher_outputs, _, teacher_feed_dicts = m.get(f.model).load_model(sess, f.model_meta, f.model_checkpoint, output_size)
    teacher_outputs = tf.stop_gradient(tf.nn.softmax(teacher_outputs))

    # create student graph
    outputs, _, feed_dicts = m.get(f.model).create_model(inputs, output_size)

    loss, train_step = create_train_ops(outputs, teacher_outputs, lr=f.lr, loss=f.loss)
    accuracy = create_eval_ops(outputs, teacher_outputs)
    summary_op = create_summary_ops(loss, accuracy)

    # only initialize non-initialized vars:
    u.init_uninitted_vars(sess)
    # (this is very important in distill: we don't want to reinit teacher model)

    saver = tf.train.Saver(tf.global_variables())

    summary_dir = os.path.join(f.summary_folder, f.run_name, 'distill')
    train_writer = tf.summary.FileWriter(os.path.join(summary_dir, 'train'), sess.graph)
    trainbatch_writer = tf.summary.FileWriter(os.path.join(summary_dir, 'train_batch'), sess.graph)
    test_writer = tf.summary.FileWriter(os.path.join(summary_dir, 'test'), sess.graph)

    with sess.as_default():
        global_step = 0

        print('Note: accuracies here are how much the student correlates to the teacher.]')
        print('For true set accuracy, multiply by teacher\'s accuracy.')

        for i in range(f.epochs):
            print('Epoch: {}'.format(i))
            for batch_x, _ in data.train_epoch_in_batches(f.train_batch_size):
                # train step. we don't need to feed batch_y because the student
                # is being trained to mimic the teacher's temperature-scaled
                # activations.
                summary, _ = sess.run([summary_op, train_step],
                        feed_dict={**teacher_feed_dicts['distill'],
                                   **feed_dicts['distill'],
                                   inputs: batch_x})
                trainbatch_writer.add_summary(summary, global_step)

                if global_step % f.eval_interval == 0:
                    # eval test
                    summaries = []
                    for test_batch_x, test_batch_y in eval_data.test_epoch_in_batches(f.test_batch_size):
                        summary = sess.run(summary_op,
                                feed_dict={**teacher_feed_dicts['distill'],
                                           **feed_dicts['distill'],
                                           inputs: test_batch_x})
                        summaries.append(summary)
                    test_writer.add_summary(merge_summary_list(summaries, True), global_step)

                    # eval train
                    summaries = []
                    for train_batch_x, train_batch_y in data.train_epoch_in_batches(f.train_batch_size):
                        summary = sess.run(summary_op,
                                feed_dict={**teacher_feed_dicts['distill'],
                                           **feed_dicts['distill'],
                                           inputs: train_batch_x})
                        summaries.append(summary)
                    train_writer.add_summary(merge_summary_list(summaries, True), global_step)

                global_step += 1

                if global_step % f.checkpoint_interval == 0:
                    checkpoint_dir = os.path.join(summary_dir, 'checkpoint/')
                    ensure_dir_exists(checkpoint_dir)
                    checkpoint_file = os.path.join(checkpoint_dir, f.model)
                    saver.save(sess, checkpoint_file, global_step=global_step)
                    print('distilled model saved in {}'.format(checkpoint_file))

    print('distilled model saved in {}'.format(checkpoint_file))
Example #41
0
def main():

    logger.info('Start to declare training variables')
    cfg.device = device = 'cuda' if torch.cuda.is_available() else 'cpu'
    best_acc = 0.  # best test accuracy
    start_epoch = 0  # start from epoch 0 or last checkpoint epoch
    start_round = 0  # start for iter 0 or last checkpoint iter

    logger.info('Start to prepare data')
    trainset, trainloader, testset, testloader = datasets.get(cfg.dataset,
                                                              instant=True)
    # cheat labels are used to compute neighbourhoods consistency only
    cheat_labels = torch.tensor(trainset.labels).long().to(device)
    ntrain, ntest = len(trainset), len(testset)
    logger.info('Totally got %d training and %d test samples' %
                (ntrain, ntest))

    logger.info('Start to build model')
    net = models.get(cfg.network, instant=True)
    npc = NonParametricClassifier(cfg.low_dim, ntrain, cfg.npc_temperature,
                                  cfg.npc_momentum)
    ANs_discovery = ANsDiscovery(ntrain)
    criterion = Criterion()
    optimizer = optimizers.get(cfg.optimizer,
                               instant=True,
                               params=net.parameters())
    lr_handler = lr_policy.get(cfg.lr_policy, instant=True)
    protocol = protocols.get(cfg.protocol)

    # data parallel
    if device == 'cuda':
        if (cfg.network.lower().startswith('alexnet')
                or cfg.network.lower().startswith('vgg')):
            net.features = torch.nn.DataParallel(net.features,
                                                 device_ids=range(
                                                     len(cfg.gpus.split(','))))
        else:
            net = torch.nn.DataParallel(net,
                                        device_ids=range(
                                            len(cfg.gpus.split(','))))
        cudnn.benchmark = True

    net, npc, ANs_discovery, criterion = (net.to(device), npc.to(device),
                                          ANs_discovery.to(device),
                                          criterion.to(device))

    # load ckpt file if necessary
    if cfg.resume:
        assert os.path.exists(
            cfg.resume), "Resume file not found: %s" % cfg.resume
        logger.info('Start to resume from %s' % cfg.resume)
        ckpt = torch.load(cfg.resume)
        net.load_state_dict(ckpt['net'])
        optimizer.load_state_dict(ckpt['optimizer'])
        npc = npc.load_state_dict(ckpt['npc'])
        ANs_discovery.load_state_dict(ckpt['ANs_discovery'])
        best_acc = ckpt['acc']
        start_epoch = ckpt['epoch']
        start_round = ckpt['round']

    # test if necessary
    if cfg.test_only:
        logger.info('Testing at beginning...')
        acc = protocol(net, npc, trainloader, testloader, 200,
                       cfg.npc_temperature, True, device)
        logger.info('Evaluation accuracy at %d round and %d epoch: %.2f%%' %
                    (start_round, start_epoch, acc * 100))
        sys.exit(0)

    logger.info('Start the progressive training process from round: %d, '
                'epoch: %d, best acc is %.4f...' %
                (start_round, start_epoch, best_acc))
    round = start_round
    global_writer = SummaryWriter(cfg.debug,
                                  log_dir=os.path.join(cfg.tfb_dir, 'global'))
    while (round < cfg.max_round):

        # variables are initialized to different value in the first round
        is_first_round = True if round == start_round else False
        best_acc = best_acc if is_first_round else 0

        if not is_first_round:
            logger.info('Start to mining ANs at %d round' % round)
            ANs_discovery.update(round, npc, cheat_labels)
            logger.info('ANs consistency at %d round is %.2f%%' %
                        (round, ANs_discovery.consistency * 100))

        ANs_num = ANs_discovery.anchor_indexes.shape[0]
        global_writer.add_scalar('ANs/Number', ANs_num, round)
        global_writer.add_scalar('ANs/Consistency', ANs_discovery.consistency,
                                 round)

        # declare local writer
        writer = SummaryWriter(cfg.debug,
                               log_dir=os.path.join(
                                   cfg.tfb_dir,
                                   '%04d-%05d' % (round, ANs_num)))
        logger.info('Start training at %d/%d round' % (round, cfg.max_round))

        # start to train for an epoch
        epoch = start_epoch if is_first_round else 0
        lr = cfg.base_lr
        while lr > 0 and epoch < cfg.max_epoch:

            # get learning rate according to current epoch
            lr = lr_handler.update(epoch)

            train(round, epoch, net, trainloader, optimizer, npc, criterion,
                  ANs_discovery, lr, writer)

            logger.info('Start to evaluate...')
            acc = protocol(net, npc, trainloader, testloader, 200,
                           cfg.npc_temperature, False, device)
            writer.add_scalar('Evaluate/Rank-1', acc, epoch)

            logger.info(
                'Evaluation accuracy at %d round and %d epoch: %.1f%%' %
                (round, epoch, acc * 100))
            logger.info('Best accuracy at %d round and %d epoch: %.1f%%' %
                        (round, epoch, best_acc * 100))

            is_best = acc >= best_acc
            best_acc = max(acc, best_acc)
            if is_best and not cfg.debug:
                target = os.path.join(cfg.ckpt_dir,
                                      '%04d-%05d.ckpt' % (round, ANs_num))
                logger.info('Saving checkpoint to %s' % target)
                state = {
                    'net': net.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'ANs_discovery': ANs_discovery.state_dict(),
                    'npc': npc.state_dict(),
                    'acc': acc,
                    'epoch': epoch + 1,
                    'round': round,
                    'session': cfg.session
                }
                torch.save(state, target)
            epoch += 1

        # log best accuracy after each iteration
        global_writer.add_scalar('Evaluate/best_acc', best_acc, round)
        round += 1
Example #42
0
def train_model(out_dir_train,
                train_file,
                test_file,
                data_transforms,
                batch_size=None,
                batch_size_val=None,
                num_epochs=100,
                save_after=20,
                disp_after=1,
                plot_after=10,
                test_after=1,
                lr=0.0001,
                dec_after=100,
                model_name='alexnet'):

    util.mkdir(out_dir_train)
    log_file = os.path.join(out_dir_train, 'log.txt')
    plot_file = os.path.join(out_dir_train, 'loss.jpg')
    log_arr = []
    plot_arr = [[], []]
    plot_val_arr = [[], []]

    train_data = dataset.Horse_Image_Dataset(train_file,
                                             data_transforms['train'])
    test_data = dataset.Horse_Image_Dataset(test_file, data_transforms['val'])

    if batch_size is None:
        batch_size = len(train_data)

    if batch_size_val is None:
        batch_size_val = len(test_data)

    train_dataloader = torch.utils.data.DataLoader(train_data,
                                                   batch_size=batch_size,
                                                   shuffle=True,
                                                   num_workers=0)

    test_dataloader = torch.utils.data.DataLoader(test_data,
                                                  batch_size=batch_size_val,
                                                  shuffle=False,
                                                  num_workers=0)

    class_weights = get_class_weights(util.readLinesFromFile(train_file))

    torch.cuda.device(0)
    iter_begin = 0

    network = models.get('caps_alexnet_simple')

    model = network.model.cuda()
    # model.train(True)
    # criterion = nn.CrossEntropyLoss(weight = torch.FloatTensor(class_weights).cuda())

    optimizer = optim.Adam(network.get_lr_list(lr), lr=0)

    if dec_after is not None:
        exp_lr_scheduler = lr_scheduler.StepLR(optimizer,
                                               step_size=dec_after,
                                               gamma=0.1)

    for num_epoch in range(num_epochs):

        for num_iter_train, batch in enumerate(train_dataloader):

            # print batch['image'].shape,torch.min(batch['image']),torch.max(batch['image'])
            # im = np.transpose(batch['image'][0].numpy(),(1,2,0))
            # im = batch['image'][0].numpy()

            # print im.shape
            # scipy.misc.imsave('../scratch/check.jpg',im)
            # raw_input()
            data = Variable(batch['image'].cuda())
            one_hot = models.utils.one_hot_encode(batch['label'], 2)
            loss_weights = torch.FloatTensor(
                np.tile(
                    np.array(class_weights)[np.newaxis, :],
                    (one_hot.shape[0], 1)))
            one_hot = torch.mul(one_hot, loss_weights)
            labels = Variable(one_hot).cuda()

            # labels = Variable(models.utils.one_hot_encode(batch['label'],2)).cuda()
            output = model(data)  # output from DigitCaps (out_digit_caps)
            loss = model.loss(data, output,
                              labels)  # pass in data for image reconstruction
            loss.backward()
            loss_iter = loss.data[0]
            optimizer.step()

            num_iter = num_epoch * len(train_dataloader) + num_iter_train
            # num_iter +=1
            plot_arr[0].append(num_iter)
            plot_arr[1].append(loss_iter)

            str_display = 'lr: %.6f, iter: %d, loss: %.4f' % (
                optimizer.param_groups[-1]['lr'], num_iter, loss_iter)
            log_arr.append(str_display)
            print str_display

            if num_iter % plot_after == 0 and num_iter > 0:
                util.writeFile(log_file, log_arr)
                if len(plot_val_arr[0]) == 0:
                    visualize.plotSimple([(plot_arr[0], plot_arr[1])],
                                         out_file=plot_file,
                                         title='Loss',
                                         xlabel='Iteration',
                                         ylabel='Loss',
                                         legend_entries=['Train'])
                else:
                    visualize.plotSimple([(plot_arr[0], plot_arr[1]),
                                          (plot_val_arr[0], plot_val_arr[1])],
                                         out_file=plot_file,
                                         title='Loss',
                                         xlabel='Iteration',
                                         ylabel='Loss',
                                         legend_entries=['Train', 'Val'])

        if num_epoch % test_after == 0:
            model.eval()
            for num_iter_test, batch in enumerate(test_dataloader):
                # data = Variable(batch['image'].cuda())
                # labels = Variable(torch.LongTensor(batch['label']).cuda())
                # loss = criterion(model(data), labels)
                # loss_iter = loss.data[0]

                data = Variable(batch['image'].cuda())
                # labels = Variable(models.utils.one_hot_encode(batch['label'],2)).cuda()

                one_hot = models.utils.one_hot_encode(batch['label'], 2)
                loss_weights = torch.FloatTensor(
                    np.tile(
                        np.array(class_weights)[np.newaxis, :],
                        (one_hot.shape[0], 1)))
                one_hot = torch.mul(one_hot, loss_weights)
                labels = Variable(one_hot).cuda()

                output = model(data)  # output from DigitCaps (out_digit_caps)
                loss = model.loss(
                    data, output,
                    labels)  # pass in data for image reconstruction
                # loss.backward()
                loss_iter = loss.data[0]
                optimizer.step()

                # test_epoch = num_epoch/test_after
                num_iter = num_epoch * len(train_dataloader) + num_iter_test
                # +=1
                #
                plot_val_arr[0].append(num_iter)
                plot_val_arr[1].append(loss_iter)

                str_display = 'lr: %.6f, val iter: %d, val loss: %.4f' % (
                    optimizer.param_groups[-1]['lr'], num_iter, loss_iter)
                log_arr.append(str_display)
                print str_display
            model.train(True)

        if num_epoch % save_after == 0:
            out_file = os.path.join(out_dir_train,
                                    'model_' + str(num_epoch) + '.pt')
            print 'saving', out_file
            torch.save(model, out_file)

        if dec_after is not None:
            exp_lr_scheduler.step()

    out_file = os.path.join(out_dir_train, 'model_' + str(num_epoch) + '.pt')
    print 'saving', out_file
    torch.save(model, out_file)

    print plot_arr[0]

    util.writeFile(log_file, log_arr)
    if len(plot_val_arr[0]) == 0:
        visualize.plotSimple([(plot_arr[0], plot_arr[1])],
                             out_file=plot_file,
                             title='Loss',
                             xlabel='Iteration',
                             ylabel='Loss',
                             legend_entries=['Train'])
    else:
        visualize.plotSimple([(plot_arr[0], plot_arr[1]),
                              (plot_val_arr[0], plot_val_arr[1])],
                             out_file=plot_file,
                             title='Loss',
                             xlabel='Iteration',
                             ylabel='Loss',
                             legend_entries=['Train', 'Val'])
Example #43
0
	def GET(self):
		galleryItems = models.get('items')
		return render.gallery(galleryItems)
Example #44
0
def test_model(out_dir_train,
                model_num,
                train_data,
                test_data,
                model_name = 'alexnet',
                batch_size_val =None,
                criterion = nn.CrossEntropyLoss()):

    out_dir_results = os.path.join(out_dir_train,'results_model_'+str(model_num))
    util.mkdir(out_dir_results)
    model_file = os.path.join(out_dir_train,'model_'+str(model_num)+'.pt')
    log_file = os.path.join(out_dir_results,'log.txt')
    log_arr=[]

    network = models.get(model_name)
    # data_transforms = network.data_transforms

    # test_data = dataset(test_file,data_transforms['val'])
    
    if batch_size_val is None:
        batch_size_val = len(test_data)
    

    test_dataloader = torch.utils.data.DataLoader(test_data, batch_size=batch_size_val,
                        shuffle=False, num_workers=1)

    torch.cuda.device(0)
    iter_begin = 0
    model = torch.load(model_file)
    model.cuda()
    model.eval()
    
    # criterion = nn.CrossEntropyLoss()
    
    predictions = []
    labels_all = []
    out_all = []

    for num_iter,batch in enumerate(test_dataloader):
                
        # batch = test_dataloader.next() 
        labels_all.append(batch['label'].numpy())

        data = Variable(batch['image'].cuda())
        labels = Variable(torch.LongTensor(batch['label']).cuda())
        

        output = model(data)
        out = output.data.cpu().numpy()
        out_all.append(out)
        
        predictions.append(np.argmax(out,1))
        
        loss = criterion(output, labels)    
        loss_iter = loss.data[0]

        str_display = 'iter: %d, val loss: %.4f' %(num_iter,loss_iter)
        log_arr.append(str_display)
        print str_display
        

        util.writeFile(log_file, log_arr)
    
    out_all = np.concatenate(out_all,0)
    predictions = np.concatenate(predictions)
    labels_all = np.concatenate(labels_all)
    
    # y_true = np.zeros((labels_all.shape[0],2))
    # y_true[labels_all==0,0]=1
    # y_true[labels_all==1,1]=1

    # f1 = sklearn.metrics.f1_score(labels_all, predictions)
    # ap = sklearn.metrics.average_precision_score(y_true, out_all)
    # roc_auc = sklearn.metrics.roc_auc_score(y_true, out_all, average='macro')
    accuracy = np.sum(predictions==labels_all)/float(labels_all.size)

    # str_display = 'f1: %.4f' %(f1)
    # print str_display
    # log_arr.append(str_display)
    
    # str_display = 'ap: %.4f' %(ap)
    # print str_display
    # log_arr.append(str_display)
    
    # str_display = 'roc_auc: %.4f' %(roc_auc)
    # print str_display
    # log_arr.append(str_display)
    
    str_display = 'accuracy: %.4f' %(accuracy)
    print str_display
    log_arr.append(str_display)
    
    util.writeFile(log_file, log_arr)
Example #45
0
def update(doc, parent, user, version, diff):
    if not diff:
        return
    models.lock_acquire(doc)
    now = time.time()
    patch = models.get(doc)
    if not patch:
        patch = []
    pre = []
    version_count = models.get('version:%s:%s' % (user, doc))
    if not version_count:
        version_count = 0
    version_max = models.get('versionmax:%s:%s' % (user, doc))
    if not version_max:
        version_max = 0
    version_time = models.get('versiontime:%s:%s' % (user, doc))
    if not version_time:
        version_time = 0
    same = []
    if parent != version_time:
        models.set('version:%s:%s' % (user, doc), 1, now+60)
        models.set('versionmax:%s:%s' % (user, doc), version, now+60)
        models.set('versiontime:%s:%s' % (user, doc), parent, now+60)
        if version == 1:
            same = [(version, diff)]
        else:
            models.set('versions:%s:%s' % (user, doc), [(version, diff)], now+60)
    else:
        same = models.get('versions:%s:%s' % (user, doc))
        if not same:
            same = []
        version_count += 1
        models.set('version:%s:%s' % (user, doc), version_count, now+60)
        if version > version_max:
            version_max = version
        models.set('versionmax:%s:%s' % (user, doc), version_max, now+60)
        if version_count == version_max:
            same.append((version, diff))
            models.delete('versions:%s:%s' % (user, doc))
        else:
            models.set('versions:%s:%s' % (user, doc), same+[(version, diff)], now+60)
            same = []
    if not same:
        models.lock_release(doc)
        return
    same = sorted(same)
    version = same[0][0]
    for i in reversed(patch):
        if i['timestamp'] == parent or (i['user'] == user and i['version']+1 == version):
            break
        pre = i['diff']+pre
    diff = []
    for i in same:
        diff += utils.forward(pre, i[1])
    version = same[-1][0]
    ret = {'parent': parent, 'timestamp': now, 'user': user, 'version': version, 'diff': diff}
    models.set(doc, filter(lambda x:x['timestamp']>=now-60, patch)+[ret])
    models.set('last:%s' % doc, now)
    text = models.get('doc:%s' % doc)
    if text:
        text = text[1]
    else:
        text = ''
    text = utils.text_patch(text, diff)
    models.set('doc:%s' % doc, (now, text))
    models.lock_release(doc)
Example #46
0
def main():
    print 'hello'

    train_file = '../data/ucf101/train_test_files/train.txt'
    limit = 500
    model_name = 'just_mill'
    network_params = dict(n_classes=20, deno = 8, init=False )

    criterion = MultiCrossEntropy().cuda()
    
    train_data = UCF_dataset(train_file, limit)
    train_dataloader = DataLoader(train_data, collate_fn = train_data.collate_fn,
                        batch_size=10,
                        shuffle=False)
    network = models.get(model_name,network_params)
    model = network.model
    model = model.cuda()
    # net = models.Network(n_classes= 20, deno = 8)
    # print net.model
    # net.model = net.model.cuda()
    # input = np.zeros((32,2048))
    # input = torch.Tensor(input).cuda()
    # input = Variable(input)
    # output, pmf = net.model(input)
    
    optimizer = torch.optim.Adam(network.get_lr_list([1e-6]),weight_decay=0)
    print len(train_dataloader)
    
    # exit = True

    for num_epoch in range(500):

        labels = []
        preds = []

        for num_iter, train_batch in enumerate(train_dataloader):
            # print num_iter
            sample = train_batch['features']
            # [0].cuda()
            label = train_batch['label'].cuda()

            print label.size()
            print len(sample)
            print sample[0].size()

            # print labels.size()
            raw_input()

            out,pmf = model.forward(sample)
            preds.append(pmf.unsqueeze(0))
            labels.append(label)


        preds = torch.cat(preds,0)
        labels = torch.cat(labels,0)
        loss = criterion(labels, preds)
        # raw_input()
            # print pmf.size()
            
        optimizer.zero_grad()

        # loss = model.multi_ce(labels, pmf)

        loss.backward()
        optimizer.step()
            

        loss_val = loss.data[0].cpu().numpy()
        
        labels = labels.data.cpu().numpy()
        preds = torch.nn.functional.softmax(preds).data.cpu().numpy()
        
        # ,np.argmax(preds,axis=1)
        accu =  np.sum(np.argmax(labels,axis=1)==np.argmax(preds,axis=1))/float(labels.shape[0])
        print num_epoch, loss_val, accu
Example #47
0
def run(sess, f, data):
    # create graph
    input_size, output_size = data.io_shape
    inputs = tf.placeholder(tf.float32, [None, input_size], name='inputs')
    outputs, _, feed_dicts = m.get(f.model).create_model(inputs, output_size)

    labels = tf.placeholder(tf.float32, [None, output_size], name='labels')
    loss, train_step = create_train_ops(outputs, labels, lr=f.lr, loss=f.loss)
    accuracy = create_eval_ops(outputs, labels, loss=f.loss)
    summary_op = create_summary_ops(loss, accuracy)

    # only initialize non-initialized vars:
    u.init_uninitted_vars(sess)
    # (this is not super important for training, but its very important
    # in optimize, and in distill)

    saver = tf.train.Saver(tf.global_variables())

    summary_dir = os.path.join(f.summary_folder, f.run_name, 'train')
    train_writer = tf.summary.FileWriter(os.path.join(summary_dir, 'train'),
                                         sess.graph)
    trainbatch_writer = tf.summary.FileWriter(
        os.path.join(summary_dir, 'train_batch'), sess.graph)
    test_writer = tf.summary.FileWriter(os.path.join(summary_dir, 'test'),
                                        sess.graph)

    with sess.as_default():
        global_step = 0

        for i in range(f.epochs):
            print('Epoch: {}'.format(i))
            for batch_x, batch_y in data.train_epoch_in_batches(
                    f.train_batch_size):
                summary, _ = sess.run([summary_op, train_step],
                                      feed_dict={
                                          **feed_dicts['train'], inputs:
                                          batch_x,
                                          labels: batch_y
                                      })
                trainbatch_writer.add_summary(summary, global_step)

                if global_step % f.eval_interval == 0:
                    # eval test set
                    summaries = []
                    for test_batch_x, test_batch_y in data.test_epoch_in_batches(
                            f.test_batch_size):
                        summary = sess.run(summary_op,
                                           feed_dict={
                                               **feed_dicts['eval'], inputs:
                                               test_batch_x,
                                               labels: test_batch_y
                                           })
                        summaries.append(summary)
                    test_writer.add_summary(
                        u.merge_summary_list(summaries, True), global_step)

                    # eval train set
                    summaries = []
                    for train_batch_x, train_batch_y in data.train_epoch_in_batches(
                            f.train_batch_size):
                        summary = sess.run(summary_op,
                                           feed_dict={
                                               **feed_dicts['eval'], inputs:
                                               train_batch_x,
                                               labels: train_batch_y
                                           })
                        summaries.append(summary)
                    train_writer.add_summary(
                        u.merge_summary_list(summaries, True), global_step)

                global_step += 1

                if global_step % f.checkpoint_interval == 0:
                    checkpoint_dir = os.path.join(summary_dir, 'checkpoint/')
                    u.ensure_dir_exists(checkpoint_dir)
                    checkpoint_file = os.path.join(checkpoint_dir, f.model)
                    saved_file = saver.save(sess,
                                            checkpoint_file,
                                            global_step=global_step)
                    print('saved model at {}'.format(saved_file))

    print('saved model at {}'.format(saved_file))
Example #48
0
def train_model(out_dir_train,
                train_data,
                test_data,
                batch_size = None,
                batch_size_val =None,
                num_epochs = 100,
                save_after = 20,
                disp_after = 1,
                plot_after = 10,
                test_after = 1,
                lr = 0.0001,
                dec_after = 100, 
                model_name = 'alexnet',
                criterion = nn.CrossEntropyLoss(),
                gpu_id = 0,
                num_workers = 0,
                model_file = None,
                epoch_start = 0):

    util.mkdir(out_dir_train)
    log_file = os.path.join(out_dir_train,'log.txt')
    plot_file = os.path.join(out_dir_train,'loss.jpg')
    log_arr = []
    plot_arr = [[],[]]
    plot_val_arr = [[],[]]

    network = models.get(model_name)
    # data_transforms = network.data_transforms
    if model_file is not None:
    #     model = network.model
    # else:
        network.model = torch.load(model_file)
    model = network.model

    # train_data = dataset(train_file,data_transforms['train'])
    # test_data = dataset(test_file,data_transforms['val'])
    
    if batch_size is None:
        batch_size = len(train_data)

    if batch_size_val is None:
        batch_size_val = len(test_data)

    train_dataloader = torch.utils.data.DataLoader(train_data, 
                        batch_size=batch_size,
                        shuffle=True, 
                        num_workers=0)
    
    test_dataloader = torch.utils.data.DataLoader(test_data, 
                        batch_size=batch_size_val,
                        shuffle=False, 
                        num_workers=num_workers)
    
    torch.cuda.device(gpu_id)
    
    model = model.cuda()
    model.train(True)
    
    optimizer = optim.SGD(network.get_lr_list(lr), lr=0, momentum=0.9)

    if dec_after is not None:
        exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=dec_after, gamma=0.1)

    for num_epoch in range(epoch_start,num_epochs):

        for num_iter_train,batch in enumerate(train_dataloader):
            
            data = Variable(batch['image'].cuda())
            labels = Variable(torch.LongTensor(batch['label']).cuda())
            optimizer.zero_grad()
            loss = criterion(model(data), labels)    
            loss_iter = loss.data[0]
            loss.backward()
            optimizer.step()
            
            
            num_iter = num_epoch*len(train_dataloader)+num_iter_train
            plot_arr[0].append(num_iter); plot_arr[1].append(loss_iter)

            str_display = 'lr: %.6f, iter: %d, loss: %.4f' %(optimizer.param_groups[-1]['lr'],num_iter,loss_iter)
            log_arr.append(str_display)
            print str_display

            if num_iter % plot_after== 0 and num_iter>0:
                util.writeFile(log_file, log_arr)
                if len(plot_val_arr[0])==0:
                    visualize.plotSimple([(plot_arr[0],plot_arr[1])],out_file = plot_file,title = 'Loss',xlabel = 'Iteration',ylabel = 'Loss',legend_entries=['Train'])
                else:
                    visualize.plotSimple([(plot_arr[0],plot_arr[1]),(plot_val_arr[0],plot_val_arr[1])],out_file = plot_file,title = 'Loss',xlabel = 'Iteration',ylabel = 'Loss',legend_entries=['Train','Val'])


        if num_epoch % test_after == 0 :
            model.eval()
            predictions = []
            labels_all = []
    
            for num_iter_test,batch in enumerate(test_dataloader):
                labels_all.append(batch['label'].numpy())
        
                data = Variable(batch['image'].cuda())
                labels = Variable(torch.LongTensor(batch['label']).cuda())
                output = model(data)
                
                out = output.data.cpu().numpy()
                predictions.append(np.argmax(out,1))                

                loss = criterion(output, labels)    
                loss_iter = loss.data[0]

                num_iter = num_epoch*len(train_dataloader)+num_iter_test
                plot_val_arr[0].append(num_iter); plot_val_arr[1].append(loss_iter)

                str_display = 'lr: %.6f, val iter: %d, val loss: %.4f' %(optimizer.param_groups[-1]['lr'],num_iter,loss_iter)
                log_arr.append(str_display)
                print str_display
            labels_all = np.concatenate(labels_all)
            predictions = np.concatenate(predictions)
            accuracy = np.sum(predictions==labels_all)/float(labels_all.size)
            str_display = 'val accuracy: %.4f' %(accuracy)
            log_arr.append(str_display)
            print str_display
            

            model.train(True)

        if num_epoch % save_after == 0:
            out_file = os.path.join(out_dir_train,'model_'+str(num_epoch)+'.pt')
            print 'saving',out_file
            torch.save(model,out_file)

        if dec_after is not None:
            exp_lr_scheduler.step()
    
    out_file = os.path.join(out_dir_train,'model_'+str(num_epoch)+'.pt')
    print 'saving',out_file
    torch.save(model,out_file)
    
    # print plot_arr[0]

    util.writeFile(log_file, log_arr)
    if len(plot_val_arr[0])==0:
        visualize.plotSimple([(plot_arr[0],plot_arr[1])],out_file = plot_file,title = 'Loss',xlabel = 'Iteration',ylabel = 'Loss',legend_entries=['Train'])
    else:
        visualize.plotSimple([(plot_arr[0],plot_arr[1]),(plot_val_arr[0],plot_val_arr[1])],out_file = plot_file,title = 'Loss',xlabel = 'Iteration',ylabel = 'Loss',legend_entries=['Train','Val'])   
Example #49
0
ap.add_argument('--no-save',
                dest='save',
                action='store_false',
                help="save model and data to files")
ap.add_argument('--resume', dest='resume_model', type=str, default="")
ap.add_argument('--n_cpu',
                type=int,
                default=1,
                help='number of CPU threads to use during data generation')
args = ap.parse_args()
print(args)

assert K.image_data_format(
) == 'channels_last', "image data format channel_last"

model = models.get(name=args.model, stride_scale=args.stride_scale)
print("#" * 14 + " MODEL " + "#" * 14)
print("### Stride scale:              " + str(model.stride_scale))
for s in model.strides:
    print("### Stride:                    " + str(s))
print("#" * 35)

dataset = datasets.get(name=args.dataset,
                       layer_strides=model.strides,
                       layer_offsets=model.offsets,
                       layer_fields=model.fields,
                       white_prob=args.white,
                       bb_positive=args.bb_positive,
                       iou_treshold=args.iou,
                       save=args.save,
                       batch_size=args.batch_size,
Example #50
0
def main():
    from models.utils import get_args, get_dataloader

    # args = get_args()
    # print args

    split_num = 0

    train_file = '../data/ck_96/train_test_files/train_' + str(
        split_num) + '.txt'
    test_file = '../data/ck_96/train_test_files/test_' + str(
        split_num) + '.txt'
    mean_file = '../data/ck_96/train_test_files/train_' + str(
        split_num) + '_mean.png'
    std_file = '../data/ck_96/train_test_files/train_' + str(
        split_num) + '_std.png'

    list_of_to_dos = ['flip', 'rotate']
    mean_im = scipy.misc.imresize(scipy.misc.imread(mean_file),
                                  (32, 32)).astype(np.float32)
    std_im = scipy.misc.imresize(scipy.misc.imread(std_file),
                                 (32, 32)).astype(np.float32)

    mean_im = scipy.misc.imread(mean_file).astype(np.float32)
    std_im = scipy.misc.imread(std_file).astype(np.float32)

    batch_size = 6
    clip = 5
    disable_cuda = False
    gpu = 2
    lr = 0.2
    num_epochs = 10
    disp_after = 1
    r = 1
    use_cuda = True

    batch_size_val = 64
    save_after = 1
    test_after = num_epochs - 1

    plot_after = 10

    lambda_ = 1e-2  #TODO:find a good schedule to increase lambda and m
    m = 0.2

    data_transforms = {}

    data_transforms['train'] = transforms.Compose([
        # lambda x: augment_image(x, list_of_to_dos, mean_im = mean_im, std_im = std_im,im_size = 48),
        # lambda x: np.concatenate([x,x,x],2),
        # transforms.ToPILImage(),

        # transforms.RandomCrop(32),
        # transforms.RandomHorizontalFlip(),
        # lambda x: x[:,:,:1],
        lambda x: augmenters.random_crop(x, 32),
        lambda x: augmenters.horizontal_flip(x),
        transforms.ToTensor(),
        lambda x: x * 255.
    ])

    data_transforms['val'] = transforms.Compose([
        # transforms.CenterCrop(32),
        lambda x: augmenters.crop_center(x, 32, 32),
        transforms.ToTensor(),
        lambda x: x * 255.
    ])

    # train_loader, test_loader = get_dataloader(batch_size)
    # for data in train_loader:
    #     imgs,labels = data
    #     print labels
    #     break
    # return

    our_data = True
    train_data = dataset.CK_48_Dataset(train_file, mean_file, std_file,
                                       data_transforms['train'])
    test_data = dataset.CK_48_Dataset(test_file, mean_file, std_file,
                                      data_transforms['val'])

    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=0)

    test_loader = torch.utils.data.DataLoader(test_data,
                                              batch_size=batch_size_val,
                                              shuffle=False,
                                              num_workers=0)

    # -batch_size=64 -lr=2e-2 -num_epochs=5 -r=1 -print_freq=5
    steps = len(train_loader.dataset) // batch_size
    print 'steps'

    A, B, C, D, E, r = 32, 8, 16, 16, 8, r  # a small CapsNet
    # model = CapsNet(A,B,C,D,E,r)
    import models
    params = dict(A=A, B=B, C=C, D=D, E=E, r=r)
    net = models.get('pytorch_mat_capsules', params)
    # net = Network(A,B,C,D,E,r)
    model = net.model
    # .cuda()

    # A,B,C,CC,D,E,r = 32,8,16,16,16,8,r # additional conv-caps layer for bigger input

    # # A,B,C,CC,D,E,r = 64,8,16,16,16,8,r #  additional conv-caps layer for bigger input
    # model = CapsNet_ck(A,B,C,CC,D,E,r)

    # print model

    with torch.cuda.device(gpu):
        #        print(gpu, type(gpu))
        # if pretrained:
        #     model.load_state_dict(torch.load(pretrained))
        #     m = 0.8
        #     lambda_ = 0.9
        if use_cuda:
            print("activating cuda")
            model.cuda()

        optimizer = torch.optim.Adam(net.get_lr_list(0.02))
        # optimizer = torch.optim.Adam(model.parameters(), lr=lr)
        # scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, 'max',patience = 1)

        for data in train_loader:
            # b += 1
            # if lambda_ < 1:
            #     lambda_ += 2e-1/steps
            # if m < 0.9:
            #     m += 2e-1/steps
            # optimizer.zero_grad()

            if our_data:
                imgs = data['image']
                labels = data['label']

            else:
                imgs, labels = data  #b,1,28,28; #b

            imgs, labels = Variable(imgs), Variable(labels)
            if use_cuda:
                imgs = imgs.cuda()
                labels = labels.cuda()

            print imgs.size()
            print labels.size()

            break

        for epoch in range(num_epochs):
            m = 0.2
            # print 'm',m
            #Train
            # print("Epoch {}".format(epoch))
            b = 0
            correct = 0

            # raw_input()

            optimizer.zero_grad()
            out = model(imgs)
            # ,lambda_) #b,10,17
            out_poses, out_labels = out[:, :-8], out[:, -8:]  #b,16*10; b,10
            loss = model.spread_loss(out_labels, labels, m)
            # raw_input()
            # loss = model.loss2(out_labels,labels)
            torch.nn.utils.clip_grad_norm(model.parameters(), clip)
            loss.backward()
            optimizer.step()
            #stats
            pred = out_labels.max(1)[1]  #b
            acc = pred.eq(labels).cpu().sum().data[0]
            correct += acc
            # if b % disp_after == 0:
            print("batch:{}, loss:{:.4f}, acc:{:}/{}".format(
                epoch, loss.data[0], acc, batch_size))
            #     break

            # break

            acc = correct / float(len(train_loader.dataset))
            # print("Epoch{} Train acc:{:4}".format(epoch, acc))
            # scheduler.step(acc)
            if epoch % save_after == 0:
                torch.save(model.state_dict(), "./model_{}.pth".format(epoch))
            # if loss.cpu().data[0]==0.0:
            #     print out_labels,labels
            #     break

            #Test
            if epoch % test_after == 0:
                print('Testing...')
                correct = 0
                # for data in test_loader:
                #     if our_data:
                #         imgs = data['image']
                #         labels = data['label']
                #     else:
                #         imgs,labels = data #b,1,28,28; #b
                #     imgs,labels = Variable(imgs),Variable(labels)
                #     if use_cuda:
                #         imgs = imgs.cuda()
                #         labels = labels.cuda()
                out = model(imgs)
                # ,lambda_) #b,10,17
                out_poses, out_labels = out[:, :-8], out[:,
                                                         -8:]  #b,16*10; b,10
                # loss = model.loss(out_labels, labels, m)
                print labels
                print out_labels

                loss = model.loss(out_labels, labels, m)
                #stats
                pred = out_labels.max(1)[1]  #b
                acc = pred.eq(labels).cpu().sum().data[0]
                correct += acc

                acc = correct / float(len(test_loader.dataset))
                print("Epoch{} Test acc:{:4}".format(epoch, acc))
Example #51
0
	def GET(self):
		blogItem = models.get('posts')
		return render.blog(blogItem)