Exemple #1
0
def main():
    """Forward sequences."""
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', '-bs', type=int, help='batch size', default=10)
    parser.add_argument('--save_path', '-sp', type=str, help='path to save the model',
                        default='models')
    parser.add_argument('--load_path', '-mp', type=str, help='path to load the model',
                        default=None)
    parser.add_argument('--lr', '-lr', type=float, help='initial learning rate',
                        default=0.2)
    parser.add_argument('--model_type', '-t', type=str, help='type of the model: inception,'
                        'vgg or squeezenet', default='inception')
    parser.add_argument('--cuda', dest='cuda', help='use cuda', action='store_true')
    parser.add_argument('--no-cuda', dest='cuda', help="don't use cuda", action='store_false')
    parser.add_argument('--freeze', '-fr', dest='freeze', help='freeze cnn layers',
                        action='store_true')
    parser.add_argument('--batch_first', dest='batch_first', action='store_true')
    parser.add_argument('--no-batch_first', dest='batch_first', action='store_false')
    parser.add_argument('--multigpu', nargs='*', default=[], help='list of gpus to use')
    parser.set_defaults(cuda=True)
    parser.set_defaults(freeze=False)
    parser.set_defaults(batch_first=True)
    args = parser.parse_args()

    filenames = {'train': 'train_no_dup.json',
                 'test': 'test_no_dup.json',
                 'val': 'valid_no_dup.json'}

    tic = time.time()
    print("Reading all texts and creating the vocabulary")
    # Create the vocabulary with all the texts.
    vocab = create_vocab([TXT_TEST_VAL_TF(t['name']) for d in json.load(
        open(os.path.join('data/label', filenames['train'])))
                          for t in d['items']])
    print("Vocabulary creation took %.2f secs - %d words" % (time.time() - tic, len(vocab)))
    import epdb; epdb.set_trace()

    data_params = {'img_dir': 'data/images',
                   'json_dir': 'data/label',
                   'json_files': filenames,
                   'batch_size': args.batch_size,
                   'batch_first': args.batch_first}
    opt_params = {'learning_rate': args.lr,
                  'weight_decay': 1e-4}

    model, dataloaders, optimizer, criterion, contrastive_criterion = config(
        net_params=[args.model_type, 512, 512, 0.2, len(vocab), args.load_path, args.freeze],
        data_params=data_params,
        opt_params=opt_params,
        cuda_params={'cuda': args.cuda,
                     'multigpu': args.multigpu})

    print("before training: lr = %.4f" % optimizer.param_groups[0]['lr'])

    scheduler = StepLR(optimizer, 2, 0.5)

    train([model, criterion, contrastive_criterion, optimizer, scheduler, vocab, args.freeze],
          dataloaders, args.cuda, args.batch_first,
          [100, 500, args.save_path])
Exemple #2
0
    def __getitem__(self, index):
        im = cv2.imread(os.path.join(self.bpath, self.im_filenames[index]), 1)
        if im is None:
            im = cv2.imread(
                os.path.join(self.bpath, self.im_filenames[index]) + '.jpg', 1)
            if im is None:
                fs = os.path.join(self.bpath, self.im_filenames[index])
                os.system('echo %s >> failed_images' % fs)
                im = np.zeros([300, 300, 3], dtype=np.uint8)
        if not self.validation:
            im = self.seq.augment_image(im)
        rows, cols = im.shape[:2]
        if rows < self.input_size\
           or cols < self.input_size:
            im = cv2.resize(im, (self.input_size, self.input_size))
        else:
            # random crop
            try:
                off_x = np.random.randint(
                    0, max(1, (cols - self.input_size) // 2))
                off_y = np.random.randint(
                    0, max(1, (rows - self.input_size) // 2))
                random_w = np.random.randint(
                    self.input_size, max(self.input_size + 1, cols - off_x))
                random_h = np.random.randint(
                    self.input_size, max(self.input_size + 1, rows - off_y))
            except:
                import epdb
                epdb.set_trace()
            im = im[off_y:off_y + random_h, off_x:off_x + random_w, :]
            im = cv2.resize(im, (self.input_size, self.input_size))
        # BGR 2 RGB
        im = im[:, :, ::-1].copy()
        # print im.shape
        # im = preprocessing.contrast_enhanced(im)

        # mean = [0.3940, 0.2713, 0.1869], RGB
        # std = [0.2777, 0.1981, 0.1574]
        # mean and std, of imaterialist, are
        # 0.5883, 0.5338, 0.5273 and
        # 0.3363, 0.3329, 0.3268
        im = ToTensor()(im)
        for t, m, s in zip(im, [0.5883, 0.5338, 0.5273],
                           [0.3363, 0.3329, 0.3268]):
            t.sub_(m).div_(s)
        label = np.zeros([228], dtype=np.float32)
        label[self.im_labels[index]] = 1
        return im, label
def visualize_batch_fn(images, labels, label_lengths):
    N = images.shape[0]
    image_mean = np.array([0.485, 0.456, 0.406]).reshape(1, 1, 3)
    image_std = np.array([0.229, 0.224, 0.225]).reshape(1, 1, 3)
    for i in range(N):
        image = images[i].data.cpu().numpy()
        image = image.transpose(1, 2, 0)
        image *= image_std
        image += image_mean
        image = (255.0 * image).astype(np.uint8)
        indexes = labels[i].data.cpu().numpy().tolist(
        )[1:label_lengths[i].item() - 1]
        indexes = [x for x in indexes]
        labels_batch = [categories[x] for x in indexes]
        cv2.imwrite("batches/%d.jpg" % i, image[:, :, ::-1])
        print '%d %s' % (i, ','.join(labels_batch))
    import epdb
    epdb.set_trace()
Exemple #4
0
def train(train_params, dataloaders, cuda, batch_first, epoch_params):
    """Train the model.

    """
    model, criterion, contrastive_criterion, optimizer, scheduler, vocab, freeze = train_params
    numepochs, nsave, save_path = epoch_params

    log_name = ('runs/L2/lr%.3f' % optimizer.param_groups[0]['initial_lr'])
    if freeze:
        log_name += '_frozen'
    writer = SummaryWriter(log_name)

    n_iter = 0
    tic_e = time.time()
    for epoch in range(numepochs):
        print("Epoch %d - lr = %.4f" % (epoch, optimizer.param_groups[0]['lr']))
        scheduler.step()
        for batch in dataloaders['train']:

            tic = time.time()
            # Clear gradients, reset hidden state.
            model.zero_grad()
            hidden = model.init_hidden(len(batch))

            # Get a list of images and texts from sequences:
            images, texts, seq_lens, im_lookup_table, txt_lookup_table = seqs2batch(batch, vocab)

            images = autograd.Variable(images)
            texts = autograd.Variable(texts)
            if cuda:
                hidden = (hidden[0].cuda(), hidden[1].cuda())
                images = images.cuda()
                texts = texts.cuda()

            packed_batch, (im_feats, txt_feats), (out, hidden) = model.forward(images,
                                                                               seq_lens,
                                                                               im_lookup_table,
                                                                               txt_lookup_table,
                                                                               hidden,
                                                                               texts)

            out, _ = pad_packed_sequence(out, batch_first=batch_first)

            fw_loss, bw_loss = criterion(packed_batch, out)

            cont_loss = contrastive_criterion(im_feats, txt_feats)

            lstm_loss = fw_loss + bw_loss
            loss = lstm_loss + cont_loss

            if np.isnan(loss.cpu().data.numpy()) or lstm_loss.cpu().data[0] < 0:
                import epdb
                epdb.set_trace()

            im_feats.register_hook(save_grad('im_feats'))
            loss.backward()
            # Gradient clipping
            torch.nn.utils.clip_grad_norm(model.parameters(), 5.0)
            optimizer.step()
            print("iteration %d took %.2f secs" % (n_iter, time.time() - tic))

            print("\033[4;32miter %d\033[0m" % n_iter)
            # Removing the [0] to avoid error from all
            print("\033[1;34mTotal loss: %.3f ||| LSTM loss: %.3f ||| Contr. loss: %.3f\033[0m" %
                  (loss.data, lstm_loss.data, cont_loss.data))
            print("Seq lens:", [len(b['texts']) for b in batch])

            dists = torch.sum(1 - F.cosine_similarity(im_feats, txt_feats))/im_feats.size()[0]
            # Removing the [0] to avoid error from all
            print("\033[0;31mmdists: %.3f\033[0m" % dists.data)

            # Removing the [0] to avoid error from all
            write_data = {'data/loss': loss.data,
                          'data/lstm_loss': lstm_loss.data,
                          'data/cont_loss': cont_loss.data,
                          'data/pos_dists': dists.data,}
                          #'grads/im_feats': torch.mean(torch.cat([torch.norm(t)
                           #                                       for t in GRADS['im_feats']]))}
            write_tensorboard(writer, write_data, n_iter)

            n_iter += 1

            if not n_iter % nsave:
                if not os.path.exists(save_path):
                    os.makedirs(save_path)
                print("Epoch %d (%d iters) -- Saving model in %s" % (epoch, n_iter, save_path))
                if not os.path.exists(save_path):
                    os.makedirs(save_path)
                torch.save(model.state_dict(), "%s_%d.pth" % (
                    os.path.join(save_path, 'model'), n_iter))

        print("\033[1;30mEpoch %i/%i: %f seconds\033[0m" % (epoch, numepochs, time.time() - tic_e))
    writer.close()
Exemple #5
0
                #                            i + epoch * len(train_dataloader))

                # tflogger.visualize_histogram(summary_writer, model,
                #                              i + epoch * len(train_dataloader))
                summary_writer.add_scalar('loss/train',
                                          loss.data.cpu().numpy(),
                                          i + epoch * len(train_dataloader))
        torch.save({'epoch': epoch,
                    'state_dict': model.state_dict()},
                   '%s_epoch_%d.pth' % (args.out, epoch))
    model.eval()
    val_loss = 0
    targets = np.zeros([0, 228])
    predictions = np.zeros([0, 228])
    with torch.no_grad():
        for i, (X, y) in enumerate(tqdm.tqdm(val_dataloader)):
            X = torch.autograd.Variable(X.cuda(),
                                        requires_grad=False)
            y = torch.autograd.Variable(y.cuda())
            y_pred = model(X)
            prob = torch.nn.functional.sigmoid(y_pred)
            predictions = np.vstack((predictions, prob.cpu().numpy()))
            targets = np.vstack((targets, y.cpu().numpy()))
    import epdb; epdb.set_trace()
    mean_f1 = []
    for cls in range(228):
        f1, thr = find_best_f1_score(predictions[:, cls],
                                     targets[:, cls])
        mean_f1.append(f1)
    np.mean(mean_f1)
Exemple #6
0
    def import_item(self, item, wordpress_namespace, out_folder=None):
        """Takes an item from the feed and creates a post file."""
        if out_folder is None:
            out_folder = 'posts'

        title = get_text_tag(item, 'title', 'NO TITLE')
        # link is something like http://foo.com/2012/09/01/hello-world/
        # So, take the path, utils.slugify it, and that's our slug
        link = get_text_tag(item, 'link', None)
        path = unquote(urlparse(link).path)

        # In python 2, path is a str. slug requires a unicode
        # object. According to wikipedia, unquoted strings will
        # usually be UTF8
        if isinstance(path, utils.bytes_str):
            path = path.decode('utf8')
        # Remove date form the slug
        slug = utils.slugify(path)[8:]
        if not slug:  # it happens if the post has no "nice" URL
            slug = get_text_tag(
                item, '{{{0}}}post_name'.format(wordpress_namespace), None)
        if not slug:  # it *may* happen
            slug = get_text_tag(
                item, '{{{0}}}post_id'.format(wordpress_namespace), None)
        if not slug:  # should never happen
            print("Error converting post:", title)
            return

        description = get_text_tag(item, 'description', '')
        post_date = get_text_tag(
            item, '{{{0}}}post_date'.format(wordpress_namespace), None)
        try:
            dt = utils.to_datetime(post_date)
        except:
            import epdb;epdb.set_trace()
        if dt.tzinfo and self.timezone is None:
            self.timezone = utils.get_tzname(dt)
        status = get_text_tag(
            item, '{{{0}}}status'.format(wordpress_namespace), 'publish')
        content = get_text_tag(
            item, '{http://purl.org/rss/1.0/modules/content/}encoded', '')

        tags = []
        if status == 'trash':
            print('Trashed post "{0}" will not be imported.'.format(title))
            return
        elif status != 'publish':
            tags.append('draft')
            is_draft = True
        else:
            is_draft = False

        for tag in item.findall('category'):
            text = tag.text
            if text == 'Uncategorized':
                continue
            tags.append(text)

        if is_draft and self.exclude_drafts:
            print('Draft "{0}" will not be imported.'.format(title))
        elif content.strip():
            # If no content is found, no files are written.
            self.url_map[link] = self.context['SITE_URL'] + '/' + \
                out_folder + '/' + slug + '.html'

            content = self.transform_content(content)

            self.write_metadata(os.path.join(self.output_folder, out_folder,
                                             slug + '.meta'),
                                title, slug, post_date, description, tags)
            self.write_content(
                os.path.join(self.output_folder, out_folder, slug + '.wp'),
                content)
        else:
            print('Not going to import "{0}" because it seems to contain'
                  ' no content.'.format(title))