コード例 #1
0
ファイル: taxonomy.py プロジェクト: vardhman1996/qdv
def gen_noffset(tax_input_folder, tax_output_folder):
    all_tax = load_all_tax(tax_input_folder)
    ensure_directory(tax_output_folder)
    for tax in all_tax:
        populate_noffset(tax.root)
        out_file = op.join(tax_output_folder, tax.root.name + '.yaml')
        with open(out_file, 'w') as fp:
            yaml.dump(tax.dump(), fp)
コード例 #2
0
ファイル: msoftmax.py プロジェクト: vardhman1996/qdv
def MNISTextract_feature(data_loader, model, feature_file, target_file):
    # extract the feature
    all_feature = []
    all_target = []
    model.eval()
    for feature, target in MNISTextract(data_loader, model):
        all_feature.append(feature)
        all_target.append(target)
    feature = torch.cat(all_feature)
    target = torch.cat(all_target)
    ensure_directory(op.dirname(feature_file))
    torch.save(feature.data.cpu(), feature_file)
    ensure_directory(op.dirname(target_file))
    torch.save(target.cpu(), target_file)
コード例 #3
0
ファイル: tsv_io.py プロジェクト: vardhman1996/qdv
def tsv_writer(values, tsv_file_name):
    ensure_directory(os.path.dirname(tsv_file_name))
    tsv_lineidx_file = os.path.splitext(tsv_file_name)[0] + '.lineidx'
    idx = 0
    tsv_file_name_tmp = tsv_file_name + '.tmp'
    tsv_lineidx_file_tmp = tsv_lineidx_file + '.tmp'
    with open(tsv_file_name_tmp, 'w') as fp, open(tsv_lineidx_file_tmp, 'w') as fpidx:
        assert values is not None
        for value in values:
            assert value
            v = '{0}\n'.format('\t'.join(value))
            fp.write(v)
            fpidx.write(str(idx) + '\n')
            idx = idx + len(v)
    os.rename(tsv_file_name_tmp, tsv_file_name)
    os.rename(tsv_lineidx_file_tmp, tsv_lineidx_file)
コード例 #4
0
def init_logging2(log_path):
    ensure_directory(op.dirname(log_path))
    format_str = '%(asctime)s.%(msecs)03d %(filename)s:%(lineno)s %(funcName)10s(): %(message)s'
    logFormatter = logging.Formatter(format_str)
    rootLogger = logging.getLogger()
    rootLogger.handlers = []

    fileHandler = logging.FileHandler(log_path)
    fileHandler.setFormatter(logFormatter)
    rootLogger.addHandler(fileHandler)

    consoleHandler = logging.StreamHandler()
    consoleHandler.setFormatter(logFormatter)
    rootLogger.addHandler(consoleHandler)

    rootLogger.setLevel(logging.INFO)
コード例 #5
0
def cmd_run(list_cmd, return_output=False, env=None,
        working_dir=None,
        shell=False):
    logging.info('start to cmd run: {}'.format(' '.join(map(str, list_cmd))))
    # if we dont' set stdin as sp.PIPE, it will complain the stdin is not a tty
    # device. Maybe, the reson is it is inside another process. 
    # if stdout=sp.PIPE, it will not print the result in the screen
    e = os.environ.copy()
    if working_dir:
        ensure_directory(working_dir)
    if env:
        for k in env:
            e[k] = env[k]
    if not return_output:
        #if env is None:
            #p = sp.Popen(list_cmd, stdin=sp.PIPE, cwd=working_dir)
        #else:
        if shell:
            p = sp.Popen(' '.join(list_cmd), 
                    stdin=sp.PIPE, 
                    env=e, 
                    cwd=working_dir,
                    shell=True)
        else:
            p = sp.Popen(list_cmd, 
                    stdin=sp.PIPE, 
                    env=e, 
                    cwd=working_dir)
        message = p.communicate()
        if p.returncode != 0:
            raise ValueError(message)
    else:
        if shell:
            message = sp.check_output(' '.join(list_cmd), 
                    env=e,
                    cwd=working_dir,
                    shell=True)
        else:
            message = sp.check_output(list_cmd,
                    env=e,
                    cwd=working_dir)
    
    logging.info('finished the cmd run')
    return message
コード例 #6
0
ファイル: create_mnist.py プロジェクト: vardhman1996/qdv
def _ensure_data_file(file_name):
    if not os.path.exists(file_name):
        ensure_directory(os.path.dirname(file_name))
        base_name = os.path.basename(file_name)
        url = 'http://yann.lecun.com/exdb/mnist/' + base_name
        urllib.urlretrieve(url, file_name)
コード例 #7
0
def min_l2_init(old_train_proto, old_train_net_param, new_train_net_proto,
                new_training_tsv, num_new_label, eps, init_model_path):
    caffe.set_device(0)
    caffe.set_mode_gpu()
    old_net = caffe.Net(str(old_train_proto), old_train_net_param, caffe.TEST)

    extract_new_proto = load_net(old_train_proto)
    if extract_new_proto.layer[0].type == 'TsvData':
        extract_new_proto.layer[0].tsv_data_param.source = new_training_tsv
    else:
        assert False

    bottom_name, top_name, layer_name = extract_last_linear_bottom_top(
        extract_new_proto)
    logging.info(bottom_name)
    logging.info(top_name)

    extract_new_proto_file = init_model_path + '.prototxt'
    write_to_file(str(extract_new_proto), extract_new_proto_file)

    new_net = caffe.Net(str(extract_new_proto_file), old_train_net_param,
                        caffe.TEST)

    def extract_data(net, num=1000):
        all_feature, all_conf, all_label = [], [], []
        while num > 0:
            net.forward(end=top_name)
            feat = np.squeeze(net.blobs[bottom_name].data)
            conf = np.squeeze(net.blobs[top_name].data)
            label = np.squeeze(net.blobs['label'].data)
            all_feature.append(feat)
            all_conf.append(conf)
            all_label.append(label)
            num = num - len(feat)
        return all_feature, all_conf, all_label

    all_old_feature, all_old_conf, all_old_label = extract_data(old_net)

    alpha = 0

    XXT = 0
    XY = 0
    total_feature = len(all_old_feature) * len(all_old_feature[0])
    for old_feature, old_conf, old_label in zip(all_old_feature, all_old_conf,
                                                all_old_label):
        C = old_conf.shape[1]
        # predict_at_label_idx - np.mean(all_predict_except_label_idx)
        a = np.choose(old_label.astype(np.int32), old_conf.T) * C / (C - 1) - \
                np.sum(old_conf, axis=1) / (C - 1)
        alpha += np.mean(a)

        XT = np.append(old_feature, np.ones((len(old_feature), 1)), axis=1)
        y = (np.sum(old_conf, axis=1) -
             np.choose(old_label.astype(np.int32), old_conf.T)) / (C - 1)
        YT = np.tile(y, [num_new_label, 1])
        XXT += np.dot(XT.T, XT)
        XY += np.dot(XT.T, YT.T)

    alpha /= len(all_old_feature)
    assert alpha > 0, 'base model is not good enough'
    th = 1
    if alpha > th:
        alpha = th

    all_new_feature, all_new_conf, all_new_label = extract_data(new_net)
    total_feature += len(all_new_feature) * len(all_new_feature[0])

    for new_feature, new_conf, new_label in zip(all_new_feature, all_new_conf,
                                                all_new_label):
        negative_conf = np.mean(new_conf, axis=1)
        positive_conf = negative_conf + alpha
        Y = np.tile(negative_conf.reshape((len(negative_conf), 1)),
                    [1, num_new_label])
        flatten_index = np.ravel_multi_index(
            [range(len(new_label)),
             new_label.astype(np.int32)], [Y.shape[0], Y.shape[1]])
        np.put(Y, flatten_index, positive_conf)
        XT = np.append(new_feature, np.ones((len(new_feature), 1)), axis=1)
        XXT += np.dot(XT.T, XT)
        XY += np.dot(XT.T, Y)
    eps = eps * total_feature
    Wb = la.solve(XXT + eps * np.identity(XXT.shape[0]), XY)

    target_net = caffe.Net(str(new_train_net_proto), caffe.TEST)
    target_net.copy_from(old_train_net_param, ignore_shape_mismatch=True)
    old_w = old_net.params[layer_name][0].data
    new_w = Wb.T[:, :-1]
    target_net.params[layer_name][0].data[...] = np.append(old_w,
                                                           new_w,
                                                           axis=0)
    old_b = old_net.params[layer_name][1].data
    new_b = Wb.T[:, -1]
    target_net.params[layer_name][1].data[...] = np.append(old_b, new_b)
    ensure_directory(op.dirname(init_model_path))
    target_net.save(init_model_path)
    logging.info('old_w: {}; new_w: {}'.format(np.mean(np.abs(old_w[:])),
                                               np.mean(np.abs(new_w[:]))))
    logging.info('old_b: {}; new_b: {}'.format(np.mean(np.abs(old_b[:])),
                                               np.mean(np.abs(new_b[:]))))
コード例 #8
0
ファイル: process_image.py プロジェクト: vardhman1996/qdv
def save_image(im, file_name):
    ensure_directory(os.path.dirname(file_name))
    cv2.imwrite(file_name, im)
コード例 #9
0
def train(**kwargs):
    train_data = 'voc20'
    train_split = 'train'
    from process_tsv import TSVDataset
    tsv_dataset = TSVDataset(train_data)
    tsv_file = tsv_dataset.get_data(train_split)
    labelmap = tsv_dataset.get_labelmap_file()
    kwargs['output_folder'] = op.join('output', '_'.join([train_data, 'vgg',
        kwargs['expid']]))
    from qd_common import ensure_directory

    ensure_directory(kwargs['output_folder'])
    
    cfg = kwargs
    if cfg['dataset'] == 'COCO':
        if cfg['dataset_root'] == VOC_ROOT:
            if not os.path.exists(COCO_ROOT):
                parser.error('Must specify dataset_root if specifying dataset')
            print("WARNING: Using default COCO dataset_root because " +
                  "--dataset_root was not specified.")
            cfg['dataset_root'] = COCO_ROOT
        for k in coco:
            assert k not in cfg
            cfg[k] = coco[k]
        dataset = COCODetection(root=cfg['dataset_root'],
                                transform=SSDAugmentation(cfg['min_dim'],
                                                          MEANS))
    #elif cfg['dataset'] == 'VOC':
        #if cfg['dataset_root'] == COCO_ROOT:
            #parser.error('Must specify dataset if specifying dataset_root')
        #for k in voc:
            #assert k not in cfg
            #cfg[k] = voc[k]
        #dataset = VOCDetection(root=cfg['dataset_root'],
                               #transform=SSDAugmentation(cfg['min_dim'],
                                                         #MEANS))
    elif cfg['dataset']:
        for k in voc:
            if k in cfg:
                logging.info('skip {} to {}'.format(cfg[k], voc[k]))
                continue
            cfg[k] = voc[k]
        dataset = TSVDetection(tsv_file=tsv_file,
                labelmap=labelmap,
                transform=SSDAugmentation(cfg['min_dim'], MEANS))


    ssd_net = build_ssd('train', cfg['min_dim'], cfg['num_classes'])
    net = ssd_net

    if cfg['cuda']:
        net = torch.nn.DataParallel(ssd_net)
        cudnn.benchmark = True

    if cfg['resume']:
        print('Resuming training, loading {}...'.format(cfg['resume']))
        ssd_net.load_weights(cfg['resume'])
        assert False, 'optimizer parameter is not loaded. Need a fix'
    else:
        vgg_weights = torch.load(cfg['save_folder'] + cfg['basenet'])
        print('Loading base network...')
        ssd_net.vgg.load_state_dict(vgg_weights)

    if cfg['cuda']:
        net = net.cuda()

    if cfg['resume']:
        print('Initializing weights...')
        # initialize newly added layers' weights with xavier method
        ssd_net.extras.apply(weights_init)
        ssd_net.loc.apply(weights_init)
        ssd_net.conf.apply(weights_init)

    optimizer = optim.SGD(net.parameters(), lr=cfg['lr'],
            momentum=cfg['momentum'],
                          weight_decay=cfg['weight_decay'])
    criterion = MultiBoxLoss(cfg['num_classes'], 0.5, True, 0, True, 3, 0.5,
                             False, cfg['cuda'])

    net.train()
    # loss counters
    loc_loss = 0
    conf_loss = 0
    epoch = 0
    logging.info('Loading the dataset...')

    epoch_size = len(dataset) // cfg['batch_size']

    step_index = 0

    data_loader = data.DataLoader(dataset, cfg['batch_size'],
                                  num_workers=cfg['num_workers'],
                                  shuffle=True, collate_fn=detection_collate,
                                  pin_memory=True)
    # create batch iterator
    batch_iterator = iter(data_loader)
    for iteration in range(cfg['start_iter'], cfg['max_iter']):
        if iteration in cfg['lr_steps']:
            step_index += 1
            adjust_learning_rate(optimizer, cfg['lr'], cfg['gamma'], step_index)

        t0 = time.time()
        # load train data
        try:
            images, targets = next(batch_iterator)
        except StopIteration:
            batch_iterator = iter(data_loader)
            images, targets = next(batch_iterator)

        if cfg['cuda']:
            images = Variable(images.cuda())
            targets = [Variable(ann.cuda(), volatile=True) for ann in targets]
        else:
            images = Variable(images)
            targets = [Variable(ann, volatile=True) for ann in targets]
        data_loading_time = time.time() - t0
        t0 = time.time()
        # forward
        out = net(images)
        # backprop
        optimizer.zero_grad()
        loss_l, loss_c = criterion(out, targets)
        loss = loss_l + loss_c
        loss.backward()
        optimizer.step()
        loc_loss += loss_l.data[0]
        conf_loss += loss_c.data[0]

        if iteration % 10 == 0:
            logging.info('data loading time {}'.format(data_loading_time))
            logging.info('timer: %.4f sec.' % (time.time() - t0))
            logging.info('iter ' + repr(iteration) + ' || Loss: %.4f ||' % (loss.data[0]))

        if iteration != 0 and iteration % 500 == 0:
            logging.info('Saving state, iter: {}'.format(iteration))
            model_file = op.join(cfg['output_folder'], 'snapshot',
                    'model_iter_{}.pth.tar'.format(iteration))
            torch.save(ssd_net.state_dict(), op.join('snapshot',
                    'model_iter_{}.pth.tar'.format(iteration)))
    model_file = op.join(cfg['output_folder'], 'snapshot', 'model_iter_{}.pth.tar'.format(iteration))
    torch.save(ssd_net.state_dict(), op.join('snapshot',
            'model_iter_{}.pth.tar'.format(iteration)))
コード例 #10
0
def save_uploaded_file(f, fname):
    from qd_common import ensure_directory
    ensure_directory(op.dirname(fname))
    with open(fname, 'w') as destination:
        for chunk in f.chunks():
            destination.write(chunk)