コード例 #1
0
ファイル: test.py プロジェクト: UWNetworksLab/metasync
def test_clone(metasync, opts, need_init=True):
    "test cloning, after init"

    if need_init:
        test_init(metasync, opts)

    dst = os.path.join(opts.tmpdir, "repo_clone")
    util.mkdirs(dst)

    # pick first backend
    srv = metasync.config.get("backend", "services").split(",")[0]

    clone = MetaSync(dst)
    clone.cmd_clone("testing", srv, opts.encrypt_key)

    # compare file side-by-side
    for root, dirs, files in os.walk(clone.path_root):
        for name in files:
            dst = os.path.join(root, name)
            src = metasync.path_root + dst[len(clone.path_root):]
            try:
                if not filecmp.cmp(dst, src):
                    assert dst.endswith("config")
            except OSError as e:
                assert name.startswith("head") or name.startswith("prev")

    return clone
コード例 #2
0
ファイル: test.py プロジェクト: UWNetworksLab/metasync
def test_fetch(metasync, opts):
    "test fetching"

    clone = test_clone(metasync, opts)

    file_sizes = [1024, 2048]
    for size in file_sizes:
        pn = os.path.join(clone.path_root, "file-%s-2" % size)
        util.create_random_file(pn, size)
        clone.cmd_checkin(pn)
    pn = os.path.join(clone.path_root, "dir1")
    util.mkdirs(pn)
    clone.cmd_checkin(pn)
    pn = os.path.join(clone.path_root, "dir2")
    util.mkdirs(pn)
    pn = os.path.join(clone.path_root, "dir2", "file-1024")
    util.create_random_file(pn, 1024)
    pn = os.path.join(clone.path_root, "dir2")
    clone.cmd_checkin(pn)
    clone.cmd_push()
    root2 = clone.get_root_blob()
    metasync.cmd_fetch()
    metasync.cmd_update()
    root = metasync.get_root_blob()
    cnt = 0
    for i in root.walk():
        cnt += 1
    assert cnt == 7
コード例 #3
0
ファイル: test.py プロジェクト: UWNetworksLab/metasync
def test_concurrent_upload(metasync, opts):

    def _put(srv, path, remote_path):
        with open(path, "rb") as f:
            srv.put(remote_path, f.read())

    # bump files
    tmpdir = os.path.join(opts.tmpdir, "metasync-files")
    sizes  = [1024, 2048, 4192, 8192, 1*MB]
    files  = []
    total_size = 1*MB

    print tmpdir

    util.mkdirs(tmpdir)
    for size in sizes:
        count = total_size / size
        fl = []
        for i in range(count):
            fn = "file-%s-%s" % (size, i)
            pn = os.path.join(tmpdir, fn)
            if not os.path.exists(pn):
                util.create_random_file(pn, size)
            fl.append(fn)
        files.append(fl)

    from metasyncAPI import Worker, ThreadPool
    from multiprocessing import cpu_count

    pool = ThreadPool(cpu_count())

    # try uploading each file
    result = [["Services"] + files]
    for cls in services.all_services:
        if cls in [services.DiskAPI]:
            continue
        row = [services.slug(cls)]
        srv = cls()
        if srv.exists('/concurrent_upload'):
            srv.rmdir('/concurrent_upload')
        srv.putdir('/concurrent_upload')
        print 'uploading:', row[0]

        for fl in files:
            beg = time.time()
            for f in fl:
                path = os.path.join(tmpdir, f)
                remote_path = '/concurrent_upload/%s' % f
                pool.submit(srv.copy, _put, path, remote_path)
            pool.join()
            end = time.time()
            row.append(end - beg)

        result.append(row)

    # tabularize
    for row in result:
        for e in row:
            print "%s\t" % e,
        print
コード例 #4
0
ファイル: __init__.py プロジェクト: UWNetworksLab/metasync
def cmd_test(metasync, args, opts):
    "quick test (e.g., metasync test {%s})"

    # invoke pdb when failed in testing
    util.install_pdb()

    tmpdir = tempfile.mkdtemp()
    root = os.path.join(tmpdir, "repo")
    util.mkdirs(root)
    metasync = MetaSync(root)

    # opts for sub test routines
    opts.root = root
    opts.tmpdir = tmpdir
    opts.encrypt_key = "testkey" if opts.encrypt else ""

    dbg.info("root: %s" % root)
    dbg.info("args: %s" % args)
    dbg.info("opts: ")
    for (k, v) in vars(opts).iteritems():
        dbg.info("  %-12s = %s" % (k, v))

    alltests = dict(test.get_all_tests())
    if any(case not in alltests for case in args):
        dbg.err("no such a test case: %s" % args)
        alltests["help"](metasync, opts)
        exit(1)

    # print help if no test case is provided
    if len(args) == 0:
        args = ["help"]

    for case in args:
        dbg.info("#R<testing %s#> (%s)" % (case, alltests[case].__doc__))
        alltests[case](metasync, opts)

    # poorman's tree
    def tree(path):
        for root, dirs, files in os.walk(path):
            base = os.path.basename(root)
            idnt = '    ' * (root.replace(path, '').count(os.sep))
            print('%s%s/' % (idnt, base))
            for f in files:
                pn = os.path.join(root, f)
                print('    %s%s [%s]' % (idnt, f, os.stat(pn).st_size))

                # dump some content of blobs
                if opts.dump and "objects" == base:
                    print(util.hexdump(util.read_file(pn, 32*2)))
                    print

    # dump root
    if not opts.notree:
        tree(tmpdir)

    # cleanup tmpdir
    if not opts.keep:
        shutil.rmtree(tmpdir)
コード例 #5
0
ファイル: test.py プロジェクト: UWNetworksLab/metasync
def test_bench_upload(metasync, opts):
    "bencmark upload speed of storage services"

    # bump files
    tmpdir = os.path.join(opts.tmpdir, "metasync-files")
    sizes  = [1024, 2048, 1*MB]
    files  = []

    # for real bench
    if opts.slow:
        sizes = [10*MB, 100*MB]

    util.mkdirs(tmpdir)
    for size in sizes:
        fn = "file-%s" % size
        pn = os.path.join(tmpdir, fn)
        if not os.path.exists(pn):
            util.create_random_file(pn, size)
        files.append(fn)

    # try uploading each file
    result = [["Services"] + files]
    for cls in services.all_services:
        if cls in [services.DiskAPI]:
            continue
        if opt.slow and cls in [services.BaiduAPI]:
            continue
        row = [services.slug(cls)]
        srv = cls()
        print 'uploading:', row[0]

        if srv.exists('/upload_test'):
            srv.rmdir('/upload_test')
        srv.putdir('/upload_test')

        for f in files:
            #if row[0] == 'baidu' and f == 'file-104857600':
            #    continue
            content = open(os.path.join(tmpdir, f), 'r').read()
            beg = time.time()
            srv.put('/upload_test/' + f, content)
            end = time.time()
            row.append(end - beg)

        result.append(row)

    # tabularize
    for row in result:
        for e in row:
            print "%s\t" % e,
        print
コード例 #6
0
ファイル: disk_api.py プロジェクト: UWNetworksLab/metasync
 def append(self, path, msg):
     import portalocker
     import time
     pn = self.get_path(path)
     util.mkdirs(os.path.dirname(pn))
     with open(pn, "a+") as log:
         while True:
             try:
                 portalocker.lock(log, portalocker.LOCK_EX)
                 break
             except:
                 dbg.dbg("lock failed")
                 time.sleep(0.1)
         log.write("%d\t%s\n" % (util.current_sec(), msg))
コード例 #7
0
ファイル: test.py プロジェクト: UWNetworksLab/metasync
def test_checkin_dir(metasync, opts):
    "test checkin with directory"

    test_init(metasync, opts)

    dst = os.path.join(metasync.path_root, "a/b")
    util.mkdirs(dst)
    pn = os.path.join(dst, "test-1024")
    util.create_random_file(pn, 1024)

    dst = os.path.join(metasync.path_root, "a")

    metasync.cmd_checkin(dst)
    metasync.cmd_push()

    test_clone(metasync, opts, False)
コード例 #8
0
ファイル: metasyncAPI.py プロジェクト: UWNetworksLab/metasync
 def _file_create(blob, pn): 
     if(blob.thv == "D" or blob.thv == "M"):
         util.mkdirs(pn)
         for i in blob.entries:
             _file_create(blob[i], os.path.join(pn, i))
     elif(blob.thv == "F"):
         content = blob.read()
         util.write_file(pn, content.getvalue())
         content.close()
         # touch metadata blob (for cmd_status)
         os.utime(os.path.join(self.path_objs, blob.hv), None)
     elif(blob.thv == "m"):
         content = blob.read()
         util.write_file(pn, content)
         # touch metadata blob (for cmd_status)
         os.utime(os.path.join(self.path_objs, blob.hv), None)
     else:
         assert False
コード例 #9
0
ファイル: test.py プロジェクト: UWNetworksLab/metasync
def test_blob_load(metasync, opts):
    "test loading file/dir from a path"

    _init_disk_metasync(metasync, opts)

    bs = blobs.BlobStore2(metasync)

    # /a/b/c
    dirp = metasync.get_local_path("a", "b", "c")
    util.mkdirs(dirp)

    # /a/b/c/file
    pn = os.path.join(dirp, "file")
    util.create_random_file(pn, 5*KB)

    blob = bs.load_dir(dirp)
    blob.add("file", bs.load_file(pn))

    # count how many blobs
    root = bs.get_root_blob()
    dbg.dbg("%-15s: %s" % ("/", root.hv))

    cnt = 0
    for (name, blob) in bs.walk():
        dbg.dbg("%-15s: %s" % (name, blob.hv))
        cnt += 1

    assert cnt == len(["a", "b", "c", "file"])

    # flush all new blobs
    assert len(os.listdir(metasync.path_objs)) == 0
    root.store()
    assert len(os.listdir(metasync.path_objs)) == 6

    # "." => root
    test_blob = bs.load_dir(metasync.get_local_path("."))
    assert test_blob == root

    test_blob = bs.load_dir(metasync.get_local_path(""))
    assert test_blob == root
コード例 #10
0
ファイル: test.py プロジェクト: UWNetworksLab/metasync
def test_rm(metasync, opts):
    "test rm file"

    _init_disk_metasync(metasync, opts, 3, 2, opts.encrypt_key)

    # create/commit some files
    size = 512
    for i in range(5):
        pn = os.path.join(opts.root, "file-%s-%s" % (size, i))
        util.create_random_file(pn, size)
        metasync.cmd_checkin(pn)

    pn = os.path.join(opts.root, "a/b")
    util.mkdirs(pn)
    metasync.cmd_checkin(pn)
    metasync.cmd_push()

    pn = os.path.join(opts.root, "a/b/e")
    util.mkdirs(pn)

    # try to remove non-exist directory
    pn = os.path.join(opts.root, "a/b/c/d")
    assert not metasync.cmd_rm(pn)

    pn = os.path.join(opts.root, "a/b/e/f")
    assert not metasync.cmd_rm(pn)

    # try to remove non-exist file

    for i in range(3):
        pn = os.path.join(opts.root, "file-%s-%s" % (size, i))
        metasync.cmd_rm(pn)
        assert not os.path.exists(pn)

    metasync.cmd_rm(os.path.join(opts.root,"a/b"))

    metasync.cmd_push()
コード例 #11
0
def process(imageDir, outputDir):
    """使用DBSACN裁剪图片"""
    files = api.getFiles(imageDir)
    total = len(files)

    pointsDir = os.path.join(outputDir, "points")
    cluserDir = os.path.join(outputDir, "cluster")
    cropDir = os.path.join(outputDir, "crop")
    util.mkdirs([cluserDir, cropDir, pointsDir])

    marginInfo = {}
    for i, f in enumerate(files):
        basename = os.path.basename(f)
        print("[info] crop image, process {} / {}: {}".format(
            i + 1, total, basename))
        img = cv2.imread(f)

        cropImg = moveMargin(img, (45, -45, 45, -45))  # 删除边框特征

        maxMargin = None
        method = ["SIFT", "SURF", "ORB"]
        mergePoints = []
        for m in method:
            img = copy.deepcopy(cropImg)
            curpointsDir = os.path.join(pointsDir, m)
            curcluserDir = os.path.join(cluserDir, m)
            curcropDir = os.path.join(cropDir, m)
            util.mkdirs([curcluserDir, curcropDir, curpointsDir])

            points = getROIPoint(img, f=m)  # 获得ROI点

            # 保存ROI
            pointsSavePath = os.path.join(curpointsDir, basename)
            savePointsImg(pointsSavePath, points, img.shape)

            try:
                maxCluster = getMaxCluster(points, 40)  # 获得最大聚类
            except:
                print("[error] skip: {}".format(basename))
                continue
            mergePoints.extend(maxCluster)  # 融合特征点

            # 保存关键点图像
            clusterSavePath = os.path.join(curcluserDir, basename)
            savePointsImg(clusterSavePath, maxCluster, img.shape)

            margin = getMargin(maxCluster)  # 获得边界点u, r, d, l
            if not maxMargin:
                maxMargin = margin
            else:
                u, r, d, l = margin
                maxMargin[0] = min(maxMargin[0], u)
                maxMargin[1] = max(maxMargin[1], r)
                maxMargin[2] = max(maxMargin[2], d)
                maxMargin[3] = min(maxMargin[3], l)

            retImg = cropImage(img, margin)  # 裁剪图片

            # 保存裁剪结果
            cropSavePath = os.path.join(curcropDir, basename)
            cv2.imwrite(cropSavePath, retImg)

        # 三种方法获得的最大边界
        w, h = img.shape[0], img.shape[1]
        if maxMargin[0] - up < 0:
            maxMargin[0] = 0
        else:
            maxMargin[0] -= up

        if maxMargin[1] + right > w:
            maxMargin[1] = w - 1
        else:
            maxMargin[1] += right
        # 图片下边缘不补偿
        if maxMargin[3] - left < 0:
            maxMargin[3] = 0
        else:
            maxMargin[3] -= left

        marginInfo[basename] = maxMargin  # 记录切边信息
        retImg = cropImage(img, maxMargin)  # 裁剪图片

        # 保存不同特征点融合图像
        mergeSavePath = os.path.join(pointsDir,
                                     "mergePoints_{}".format(basename))
        savePointsImg(mergeSavePath, mergePoints, img.shape)

        # 保存裁剪结果
        cropSavePath = os.path.join(cropDir, basename)
        cv2.imwrite(cropSavePath, retImg)

        # if i == 5: assert False, 'break'

    writeMarginInfo(os.path.join(outputDir, "marginInfo.txt"),
                    marginInfo)  # 保存到结果目录

    os.startfile(outputDir)
コード例 #12
0
def main():
    #### options
    parser = argparse.ArgumentParser()
    parser.add_argument('-opt', type=str, help='Path to option YMAL file.')
    parser.add_argument('--launcher', choices=['none', 'pytorch'], default='none',
                        help='job launcher')
    parser.add_argument('--local_rank', type=int, default=0)
    args = parser.parse_args()
    opt = option.parse(args.opt, is_train=True)

    #### distributed training settings
    if args.launcher == 'none':  # disabled distributed training
        opt['dist'] = False
        rank = -1
        print('Disabled distributed training.')
    else:
        opt['dist'] = True
        init_dist()
        world_size = torch.distributed.get_world_size()
        rank = torch.distributed.get_rank()

    #### loading resume state if exists
    if opt['path'].get('resume_state', None):
        # distributed resuming: all load into default GPU
        device_id = torch.cuda.current_device()
        resume_state = torch.load(opt['path']['resume_state'],
                                  map_location=lambda storage, loc: storage.cuda(device_id))
        option.check_resume(opt, resume_state['iter'])  # check resume options
    else:
        resume_state = None

    #### mkdir and loggers
    if rank <= 0:  # normal training (rank -1) OR distributed training (rank 0)
        if resume_state is None:
            print(opt['path'])
            util.mkdir_and_rename(
                opt['path']['experiments_root'])  # rename experiment folder if exists
            util.mkdirs((path for key, path in opt['path'].items() if not key == 'experiments_root'
                         and 'pretrain_model' not in key and 'resume' not in key and path is not None))

        # config loggers. Before it, the log will not work
        util.setup_logger('base', opt['path']['log'], 'train_' + opt['name'], level=logging.INFO,
                          screen=True, tofile=True)
        util.setup_logger('val', opt['path']['log'], 'val_' + opt['name'], level=logging.INFO,
                          screen=True, tofile=True)
        logger = logging.getLogger('base')
        logger.info(option.dict2str(opt))
        # tensorboard logger
        if opt['use_tb_logger'] and 'debug' not in opt['name']:
            version = float(torch.__version__[0:3])
            if version >= 1.1:  # PyTorch 1.1
                from torch.utils.tensorboard import SummaryWriter
            else:
                logger.info(
                    'You are using PyTorch {}. Tensorboard will use [tensorboardX]'.format(version))
                from tensorboardX import SummaryWriter
            trial = 0
            while os.path.isdir('../Loggers/' + opt['name'] + '/' + str(trial)):
                trial += 1
            tb_logger = SummaryWriter(log_dir='../Loggers/' + opt['name'] + '/' + str(trial))
    else:
        util.setup_logger('base', opt['path']['log'], 'train', level=logging.INFO, screen=True)
        logger = logging.getLogger('base')

    # convert to NoneDict, which returns None for missing keys
    opt = option.dict_to_nonedict(opt)

    # -------------------------------------------- ADDED --------------------------------------------
    l1_loss = torch.nn.L1Loss()
    mse_loss = torch.nn.MSELoss()
    calc_lpips = PerceptualLossLPIPS()
    if torch.cuda.is_available():
        l1_loss = l1_loss.cuda()
        mse_loss = mse_loss.cuda()
    # -----------------------------------------------------------------------------------------------

    #### random seed
    seed = opt['train']['manual_seed']
    if seed is None:
        seed = random.randint(1, 10000)
    if rank <= 0:
        logger.info('Random seed: {}'.format(seed))
    util.set_random_seed(seed)

    torch.backends.cudnn.benckmark = True
    # torch.backends.cudnn.deterministic = True

    #### create train and val dataloader
    dataset_ratio = 200  # enlarge the size of each epoch
    for phase, dataset_opt in opt['datasets'].items():
        if phase == 'train':
            train_set = create_dataset(dataset_opt)
            train_size = int(math.ceil(len(train_set) / dataset_opt['batch_size']))
            total_iters = int(opt['train']['niter'])
            total_epochs = int(math.ceil(total_iters / train_size))
            if opt['dist']:
                train_sampler = DistIterSampler(train_set, world_size, rank, dataset_ratio)
                total_epochs = int(math.ceil(total_iters / (train_size * dataset_ratio)))
            else:
                train_sampler = None
            train_loader = create_dataloader(train_set, dataset_opt, opt, train_sampler)
            if rank <= 0:
                logger.info('Number of train images: {:,d}, iters: {:,d}'.format(
                    len(train_set), train_size))
                logger.info('Total epochs needed: {:d} for iters {:,d}'.format(
                    total_epochs, total_iters))
        elif phase == 'val':
            val_set = create_dataset(dataset_opt)
            val_loader = create_dataloader(val_set, dataset_opt, opt, None)
            if rank <= 0:
                logger.info('Number of val images in [{:s}]: {:d}'.format(
                    dataset_opt['name'], len(val_set)))
        else:
            raise NotImplementedError('Phase [{:s}] is not recognized.'.format(phase))
    assert train_loader is not None

    #### create model
    model = Model(opt)

    #### resume training
    if resume_state:
        logger.info('Resuming training from epoch: {}, iter: {}.'.format(
            resume_state['epoch'], resume_state['iter']))

        start_epoch = resume_state['epoch']
        current_step = resume_state['iter']
        model.resume_training(resume_state)  # handle optimizers and schedulers
    else:
        current_step = 0
        start_epoch = 0

    #### training
    logger.info('Start training from epoch: {:d}, iter: {:d}'.format(start_epoch, current_step))
    for epoch in range(start_epoch, total_epochs + 1):
        if opt['dist']:
            train_sampler.set_epoch(epoch)
        train_bar = tqdm(train_loader, desc='[%d/%d]' % (epoch, total_epochs))
        for bus, train_data in enumerate(train_bar):

             # validation
            if epoch % opt['train']['val_freq'] == 0 and bus == 0 and rank <= 0:
                avg_ssim = avg_psnr = avg_lpips = val_pix_err_f = val_pix_err_nf = val_mean_color_err = 0.0
                print("into validation!")
                idx = 0
                val_bar = tqdm(val_loader, desc='[%d/%d]' % (epoch, total_epochs))
                for val_data in val_bar:
                    idx += 1
                    img_name = os.path.splitext(os.path.basename(val_data['LQ_path'][0]))[0]
                    img_dir = os.path.join(opt['path']['val_images'], img_name)
                    util.mkdir(img_dir)

                    model.feed_data(val_data)
                    model.test()

                    visuals = model.get_current_visuals()
                    sr_img = util.tensor2img(visuals['SR'])  # uint8
                    gt_img = util.tensor2img(visuals['GT'])  # uint8
                    lq_img = util.tensor2img(visuals['LQ'])  # uint8
                    #nr_img = util.tensor2img(visuals['NR'])  # uint8
                    #nf_img = util.tensor2img(visuals['NF'])  # uint8
                    #nh_img = util.tensor2img(visuals['NH'])  # uint8


                    #print("Great! images got into here.")

                    # Save SR images for reference
                    save_sr_img_path = os.path.join(img_dir,
                                                 '{:s}_{:d}_sr.png'.format(img_name, current_step))
                    save_nr_img_path = os.path.join(img_dir,
                                                 '{:s}_{:d}_lq.png'.format(img_name, current_step))
                    #save_nf_img_path = os.path.join(img_dir,
                                                # 'bs_{:s}_{:d}_nr.png'.format(img_name, current_step)) 
                    #save_nh_img_path = os.path.join(img_dir,
                                                # 'bs_{:s}_{:d}_nh.png'.format(img_name, current_step)) 
                    util.save_img(sr_img, save_sr_img_path)
                    util.save_img(lq_img, save_nr_img_path)
                    #util.save_img(nf_img, save_nf_img_path)
                    #util.save_img(nh_img, save_nh_img_path)


                    #print("Saved")
                    # calculate PSNR
                    gt_img = gt_img / 255.
                    sr_img = sr_img / 255.
                    #nf_img = nf_img / 255.
                    lq_img = lq_img / 255.
                    #cropped_lq_img = lq_img[crop_size:-crop_size, crop_size:-crop_size, :]
                    #cropped_nr_img = nr_img[crop_size:-crop_size, crop_size:-crop_size, :]
                    avg_psnr += util.calculate_psnr(sr_img * 255, gt_img * 255)
                    avg_ssim += util.calculate_ssim(sr_img * 255, gt_img * 255)
                    avg_lpips += calc_lpips(visuals['SR'], visuals['GT'])
                    #avg_psnr_n += util.calculate_psnr(cropped_lq_img * 255, cropped_nr_img * 255)

                    # ----------------------------------------- ADDED -----------------------------------------
                    val_pix_err_nf += l1_loss(visuals['SR'], visuals['GT'])
                    val_mean_color_err += mse_loss(visuals['SR'].mean(2).mean(1), visuals['GT'].mean(2).mean(1))
                    # -----------------------------------------------------------------------------------------
                
                
                avg_psnr = avg_psnr / idx
                avg_ssim = avg_ssim / idx
                avg_lpips = avg_lpips / idx
                val_pix_err_f /= idx
                val_pix_err_nf /= idx
                val_mean_color_err /= idx



                # log
                logger.info('# Validation # PSNR: {:.4e},'.format(avg_psnr))
                logger.info('# Validation # SSIM: {:.4e},'.format(avg_ssim))
                logger.info('# Validation # LPIPS: {:.4e},'.format(avg_lpips))
                logger_val = logging.getLogger('val')  # validation logger
                logger_val.info('<epoch:{:3d}, iter:{:8,d}> psnr: {:.4e} ssim: {:.4e} lpips: {:.4e}'.format(
                    epoch, current_step, avg_psnr, avg_ssim, avg_lpips))
                # tensorboard logger
                if opt['use_tb_logger'] and 'debug' not in opt['name']:
                    tb_logger.add_scalar('val_psnr', avg_psnr, current_step)
                    tb_logger.add_scalar('val_ssim', avg_ssim, current_step)
                    tb_logger.add_scalar('val_lpips', avg_lpips, current_step)
                    tb_logger.add_scalar('val_pix_err_nf', val_pix_err_nf, current_step)
                    tb_logger.add_scalar('val_mean_color_err', val_mean_color_err, current_step)

            current_step += 1
            if current_step > total_iters:
                break
            #### update learning rate
            model.update_learning_rate(current_step, warmup_iter=opt['train']['warmup_iter'])

            #### training
            model.feed_data(train_data)
            model.optimize_parameters(current_step)
            model.clear_data()
            #### tb_logger
            if current_step % opt['logger']['tb_freq'] == 0:
                logs = model.get_current_log()
                if opt['use_tb_logger'] and 'debug' not in opt['name']:
                    for k, v in logs.items():
                        if rank <= 0:
                            tb_logger.add_scalar(k, v, current_step)

            
            #### logger
            if epoch % opt['logger']['print_freq'] == 0  and epoch != 0 and bus == 0:
                logs = model.get_current_log()
                message = '<epoch:{:3d}, iter:{:8,d}, lr:{:.3e}> '.format(
                    epoch, current_step, model.get_current_learning_rate())
                for k, v in logs.items():
                    message += '{:s}: {:.4e} '.format(k, v)
                if rank <= 0:
                    logger.info(message)

           
            #### save models and training states
            if epoch % opt['logger']['save_checkpoint_freq'] == 0 and epoch != 0 and bus == 0:
                if rank <= 0:
                    logger.info('Saving models and training states.')
                    model.save(current_step)
                    model.save_training_state(epoch, current_step)

    if rank <= 0:
        logger.info('Saving the final model.')
        model.save('latest')
        logger.info('End of training.')
コード例 #13
0
# UResNet
if network_type == 'UResNet':
	model = UResNet()
	alpha_list = [0.445, 0.275, 0.13]
	beta_list = [0.15, 0., 0.]
# RectNet
elif network_type == 'RectNet':
	model = RectNet()
	alpha_list = [0.535, 0.272]
	beta_list = [0.134, 0.068,]
else:
	assert True, 'Unsupported network type'

# Make the checkpoint directory
mkdirs(checkpoint_dir)


# -------------------------------------------------------
# Set up the training routine
network = nn.DataParallel(
	model.float(),
	device_ids=device_ids).to(device)


#loading the model

checkpoint = torch.load(checkpoint_path)
util.load_partial_model(network, checkpoint['state_dict'])

#runnig inference
コード例 #14
0
consumer_key = 'CDq4zm9Gu5A07KakkYyIviMYn'
consumer_secret = 'PVUgtdj7FC6xkKPMRBZ1cjB8xQ86EcCWFLiQJP2UUaReFJSqA9'
access_token = '7939525151602774016-wmIukaGwjopWCz4ebAvGjmJD9I4YbJA'
access_secret = 'SNHNLDftRySr5gln7sbANjIJzF7wTEP2k4xqe71Z2CYZi'

auth = OAuthHandler(consumer_key=consumer_key, consumer_secret=consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)

#target_name = 'alibaba'
#extra_key_words = ['alibaba']
target_name = sys.argv[1]
extra_key_words = [sys.argv[1]]
file_path = target_name + '/' + time.strftime('%Y-%m-%d-%M',
                                              time.localtime(time.time()))
mkdirs(file_path)

max_text_count = 500
current_text_count = 0


def process_full_text(full_text):
    global current_text_count

    # skip short text
    size = len(full_text)
    if size <= 2:
        return

    # clear url
    full_text = clear_url(full_text)
コード例 #15
0
ファイル: train.py プロジェクト: mkxmkx/experiment_backup
from datetime import datetime

if __name__ == "__main__":
    config = util.initialize_from_env()

    report_frequency = config["report_frequency"]
    eval_frequency = config["eval_frequency"]

    # cluster_config = config["cluster"]
    # util.set_gpus(*cluster_config["gpus"])

    model = cm.CorefModel(config)
    saver = tf.train.Saver()

    TIMESTAMP = "{0:%Y-%m-%dT%H-%M-%S/}".format(datetime.now())
    log_dir = util.mkdirs(os.path.join(config["log_dir"], "Global_warming"))
    writer = tf.summary.FileWriter(log_dir, flush_secs=20)

    max_f1 = 0
    max_presission = 0
    max_recall = 0
    max_accuracy = 0
    coord = tf.train.Coordinator()
    with tf.Session() as session:

        # session = tf_debug.LocalCLIDebugWrapperSession(session)
        # session.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)

        #session.run(tf.initialize_all_variables())
        session.run(tf.global_variables_initializer())
        model.start_enqueue_thread(session)
コード例 #16
0
def genDiffPicByThresholdValue(imgDir, outputDir):
	files = api.getFiles(imgDir)

	util.mkdirs(outputDir)
コード例 #17
0

if shutil.which("gradle") is None:
    print("Gradle not installed")
    exit(1)

gradleLines = []
buildInfo = util.parse_build_info()
with open(args.template, 'r') as templateFile:
    gradleLines = templateFile.readlines()
gradleLines = buildInfo.rewrite_variables(gradleLines)
with open(gradleBuildFile, "w+") as gradleFile:
    gradleFile.writelines(gradleLines)

# Fake patches dir
util.mkdirs(os.path.join(args.gradle, 'patches'))

gradleDecompiledDir = os.path.join(args.gradle, "projects", "Clean", "src", "main", "java")
if args.force or not os.path.exists(os.path.join(gradleDecompiledDir, "net", "minecraft")):
    print("Tricking ForgeGradle into decompiling minecraft")
    gradle_log = open("gradle.log", "w+")
    result = subprocess.Popen(["gradle", "clean", "setup"], universal_newlines=True,
                                  stdout=gradle_log, stderr=gradle_log, cwd=args.gradle).wait()
    if result != 0:
        print("Unable to decompile minecraft")
        print("See gradle.log for more info")
        exit(1)
    print("Successfully Decompiled minecraft")
else:
    print("Using cached sources")
コード例 #18
0
ファイル: test_single.py プロジェクト: qq547276542/e2e-coref
import coref_model as cm
import util

if __name__ == "__main__":
  if "GPU" in os.environ:
    util.set_gpus(int(os.environ["GPU"]))
  else:
    util.set_gpus()

  if len(sys.argv) > 1:
    name = sys.argv[1]
    print("Running experiment: {} (from command-line argument).".format(name))
  else:
    name = os.environ["EXP"]
    print("Running experiment: {} (from environment variable).".format(name))

  config = util.get_config("experiments.conf")[name]
  config["log_dir"] = util.mkdirs(os.path.join(config["log_root"], name))

  util.print_config(config)
  model = cm.CorefModel(config)

  saver = tf.train.Saver()
  log_dir = config["log_dir"]

  with tf.Session() as session:
    checkpoint_path = os.path.join(log_dir, "model.max.ckpt")
    print("Evaluating {}".format(checkpoint_path))
    saver.restore(session, checkpoint_path)
    model.evaluate(session, official_stdout=True)
コード例 #19
0
def test(options):

    # print variable names
    for v in tf.trainable_variables():
        print(v.name)
        print(v.get_shape())

    print('Loading data ...')
    data_provision = DataProvision(options)

    batch_size = 400
    split = 'test'

    test_batch_generator = data_provision.iterate_batch(split, batch_size)
    unique_anno_ids = data_provision.get_ids(split)
    anchors = data_provision.get_anchors()
    grounding = data_provision.get_grounding(split)

    print('Start to predict ...')
    t0 = time.time()

    count = 0

    # output data, for evaluation
    out_data = {}
    out_data['results'] = {}
    results = {}

    for batch_data in test_batch_generator:
        video_feats = batch_data['video_feat']
        video_feat_mask = batch_data['video_feat_mask']
        feat_lens = np.sum(video_feat_mask, axis=-1)
        this_batch_size = video_feat_mask.shape[0]

        for sample_id in range(this_batch_size):
            unique_anno_id = unique_anno_ids[count]
            feat_len = feat_lens[sample_id]
            # small gap (in seconds) due to feature resolution
            gap = 0.5

            print('%d-th video-query: %s, feat_len: %d' %
                  (count, unique_anno_id, feat_len))

            result = []
            scores = np.random.random(size=(feat_len, options['num_anchors']))
            for i in range(feat_len):
                for j in range(options['num_anchors']):
                    # calculate time stamp from feature id
                    end_feat = i + 0.5
                    start_feat = end_feat - anchors[j]
                    end_time = options['feature_to_second'] * end_feat
                    start_time = options['feature_to_second'] * start_feat

                    if start_time < 0. - gap:
                        continue

                    start_time = max(0., start_time)

                    result.append({
                        'timestamp': [start_time, end_time],
                        'score': scores[i, j]
                    })

            print('Number of proposals (before post-processing): %d' %
                  len(result))

            result = sorted(result, key=lambda x: x['score'], reverse=True)

            # non-maximum suppresion
            result = nms_detections(result, overlap=options['nms_threshold'])
            print('Number of proposals (after nms): %d' % len(result))

            result = sorted(result, key=lambda x: x['score'], reverse=True)
            result = result[:10]

            print('#{}, {}'.format(count, unique_anno_id))
            print('sentence query:')
            sentence_query = grounding[unique_anno_id]['raw_sentence']
            print(sentence_query)
            print('result (top 10):')
            print(result)
            print('groundtruth:')
            print(grounding[unique_anno_id]['timestamp'])

            results[unique_anno_id] = result

            count = count + 1

    out_data['results'] = results

    out_json_file = 'results/random_anchor_predict_proposals_%s_nms_%.2f.json' % (
        split, options['nms_threshold'])

    mkdirs(os.path.dirname(out_json_file))

    print('Writing result json file ...')
    with open(out_json_file, 'w') as fid:
        json.dump(out_data, fid)

    print('Evaluating ...')
    recall_at_k = get_recall_at_k(results, grounding, options['tiou_measure'],
                                  options['max_proposal_num'])

    print('Recall at {}: {}'.format(options['max_proposal_num'], recall_at_k))

    print('Total running time: %f seconds.' % (time.time() - t0))
コード例 #20
0
#!/usr/bin/env python

import sys
import subprocess as sp

import util

def screen(py_script, name, args):
  bash = "source ~/.bashrc; python {} {}; exec bash".format(py_script, " ".join(str(a) for a in args))
  command = ["screen", "-dmS", name, "bash", "-c", bash]
  print(" ".join(command))
  sp.call(command)

if __name__ == "__main__":
  exp_name = sys.argv[1]
  util.mkdirs("logs")
  cluster_config = util.get_config("experiments.conf")[exp_name]["cluster"]
  screen("parameter_server.py", "ps", [exp_name])
  screen("evaluator.py", "eval", [exp_name])
  for i, _ in enumerate(cluster_config["addresses"]["worker"]):
    screen("worker.py", "w{}".format(i), [exp_name, i])
コード例 #21
0
ファイル: disk_api.py プロジェクト: UWNetworksLab/metasync
 def putdir(self, path):
     pn = self.get_path(path)
     util.mkdirs(pn)
コード例 #22
0
ファイル: train.py プロジェクト: mkxmkx/experiment_backup
if __name__ == "__main__":
    config = util.initialize_from_env()

    report_frequency = config["report_frequency"]
    eval_frequency = config["eval_frequency"]

    # cluster_config = config["cluster"]
    # util.set_gpus(*cluster_config["gpus"])

    model = cm.CorefModel(config)
    saver = tf.train.Saver()

    TIMESTAMP = "{0:%Y-%m-%dT%H-%M-%S/}".format(datetime.now())
    log_dir = util.mkdirs(
        os.path.join(
            config["log_dir"],
            "v1_PublicKeyCryptography_lambda:" + str(config["lambda"])))
    writer = tf.summary.FileWriter(log_dir, flush_secs=20)

    max_f1 = 0
    max_presission = 0
    max_recall = 0
    max_accuracy = 0
    coord = tf.train.Coordinator()
    with tf.Session() as session:

        # session = tf_debug.LocalCLIDebugWrapperSession(session)
        # session.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)

        #session.run(tf.initialize_all_variables())
        session.run(tf.global_variables_initializer())
コード例 #23
0
ファイル: disk_api.py プロジェクト: dstarikov/metavault
 def update(self, path, content):
     dbg.api('put:' + self.root + ',' + path)
     pn = self.get_path(path)
     util.mkdirs(os.path.dirname(pn))
     return util.write_file(pn, content)
コード例 #24
0
ファイル: disk_api.py プロジェクト: dstarikov/metavault
 def putdir(self, path):
     pn = self.get_path(path)
     util.mkdirs(pn)
コード例 #25
0
ファイル: disk_api.py プロジェクト: dstarikov/metavault
 def __init__(self, root):
     self.root = root
     util.mkdirs(root)
     self._sid = util.md5(self.root) % 10000
コード例 #26
0
import os, sys, logging, logging.handlers
from util import mkdirs


#########################################
# Setup Logging
#########################################

LOG_FILENAME = sys.path[0] + '/log/change-merge-hook-output.log'
mkdirs(LOG_FILENAME)

formatter = logging.Formatter("%(asctime)s %(levelname)s %(name)s >> %(message)s")

# Add the log message handler to the logger
fileHandler = logging.handlers.RotatingFileHandler(
              LOG_FILENAME, maxBytes=2400000, backupCount=5)
fileHandler.setFormatter(formatter)

consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.DEBUG)
consoleHandler.setFormatter(formatter)

# Set up a specific logger with our desired output level
def getLogger(loggerName, level=logging.DEBUG):
    logger = logging.getLogger(loggerName)
    logger.setLevel(level)
    logger.addHandler(consoleHandler)
    logger.addHandler(fileHandler)
    return logger

コード例 #27
0
ファイル: write_single.py プロジェクト: TigranGalstyan/SciERC
    #if "GPU" in os.environ:
    #  util.set_gpus(int(os.environ["GPU"]))
    #else:
    #  util.set_gpus()

    if len(sys.argv) > 1:
        name = sys.argv[1]
        print("Running experiment: {} (from command-line argument).".format(
            name))
    else:
        name = os.environ["EXP"]
        print(
            "Running experiment: {} (from environment variable).".format(name))

    config = util.get_config("experiments.conf")[name]
    config["log_dir"] = util.mkdirs(os.path.join(config["log_root"], name))

    config["batch_size"] = -1
    config["max_tokens_per_batch"] = -1

    # Use dev lm, if provided.
    if config["lm_path"] and "lm_path_dev" in config and config["lm_path_dev"]:
        config["lm_path"] = config["lm_path_dev"]

    util.print_config(config)
    data = LSGNData(config)
    model = SRLModel(data, config)
    evaluator = LSGNEvaluator(config)

    variables_to_restore = []
    for var in tf.global_variables():
コード例 #28
0
ファイル: disk_api.py プロジェクト: UWNetworksLab/metasync
 def update(self, path, content):
     dbg.api('put:' + self.root +',' + path)
     pn = self.get_path(path)
     util.mkdirs(os.path.dirname(pn))
     return util.write_file(pn, content)
コード例 #29
0
import sys

from os.path import abspath
from subprocess import check_call
from util import projectDir, rmpath, mkdirs

sys.platform == 'linux' or sys.exit('error: script only runs on Linux')
shutil.which('git')     or sys.exit('error: git missing')
shutil.which('make')    or sys.exit('error: make missing')
shutil.which('tar')     or sys.exit('error: tar missing')

buildDir = os.path.join(projectDir, 'out', 'build-backend')
artifactDir = os.path.join(projectDir, 'out', 'artifact')

rmpath(buildDir)
mkdirs(buildDir)
mkdirs(artifactDir)

os.chdir(buildDir)

prebuiltUrl = 'https://github.com/rprichard/x86_64-linux-glibc2.15-4.8.git'
prebuiltPath = abspath('linux-prebuilt')
artifactPath = os.path.join(artifactDir, 'backend.tar.gz')

check_call(['git', 'clone', prebuiltUrl, prebuiltPath])

os.putenv('CXX',   os.path.join(prebuiltPath, 'bin', 'x86_64-linux-g++'))
os.putenv('STRIP', os.path.join(prebuiltPath, 'bin', 'x86_64-linux-strip'))
check_call(['make', 'clean'], cwd=os.path.join(projectDir, 'backend'))
check_call(['make'],          cwd=os.path.join(projectDir, 'backend'))
check_call(['tar', 'cfa', artifactPath, '-C', '..', 'wslbridge-backend'])
コード例 #30
0
ファイル: disk_api.py プロジェクト: UWNetworksLab/metasync
 def __init__(self, root):
     self.root = root
     util.mkdirs(root)
     self._sid  = util.md5(self.root) % 10000
コード例 #31
0
def process(imageDir, outputDir):
    """使用DBSACN裁剪图片"""
    files = api.getFiles(imageDir)
    total = len(files)

    pointsDir = os.path.join(outputDir, "points")
    cluserDir = os.path.join(outputDir, "cluster")
    cropDir = os.path.join(outputDir, "crop")
    util.mkdirs([cluserDir, cropDir, pointsDir])

    marginInfo = {}
    for i, f in enumerate(files):
        basename = os.path.basename(f)
        print("process {} / {}: {}".format(i+1, total, basename))
        img = cv2.imread(f)

        maxMargin = None
        method = ["SIFT", "SURF", "ORB"]
        for m in method:
            curpointsDir = os.path.join(pointsDir, m)
            curcluserDir = os.path.join(cluserDir, m)
            curcropDir = os.path.join(cropDir, m)
            util.mkdirs([curcluserDir, curcropDir, curpointsDir])


            points = getROIPoint(img)               # 获得ROI点

            # 保存ROI
            pointsSavePath = os.path.join(curpointsDir, basename)
            savePointsImg(pointsSavePath, points, img.shape)


            maxCluster = getMaxCluster(points, 40)  # 获得最大聚类

            # 保存关键点图像
            clusterSavePath = os.path.join(curcluserDir, basename)
            savePointsImg(clusterSavePath, maxCluster, img.shape)

            margin = getMargin(maxCluster)  # 获得边界点u, r, d, l
            if not maxMargin:
                maxMargin = margin
            else:
                u, r, d, l = margin
                maxMargin[0] = min(maxMargin[0], u) 
                maxMargin[1] = max(maxMargin[1], r) 
                maxMargin[2] = max(maxMargin[2], d) 
                maxMargin[3] = min(maxMargin[3], l) 

            retImg = cropImage(img, margin) # 裁剪图片

            # 保存裁剪结果
            cropSavePath = os.path.join(curcropDir, basename)
            cv2.imwrite(cropSavePath, retImg)

        # 三种方法获得的最大边界
        h, w = img.shape[0], img.shape[1]
        print
        if maxMargin[0] - up < 0:
            maxMargin[0] = 0
        else:
            maxMargin[0] -= up

        if maxMargin[1] + right > w:
            maxMargin[1] = w - 1
        else:
            maxMargin[1] += right

        if maxMargin[3] - left < 0:
            maxMargin[3] = 0
        else:
            maxMargin[3] -= left

        marginInfo[basename] = maxMargin    # 记录切边信息
        retImg = cropImage(img, maxMargin)  # 裁剪图片

        # 保存裁剪结果
        cropSavePath = os.path.join(cropDir, basename)
        cv2.imwrite(cropSavePath, retImg)

    writeMarginInfo(os.path.join(outputDir, "marginInfo.txt"), marginInfo)   # 保存到结果目录

    os.startfile(outputDir)