Example #1
0
def rsync_file(from_path, to_path):
    make_dirs(os.path.dirname(to_path))

    subprocess_cmd = [
        "rsync",
        "--progress",
        "--chmod=D555",
        "--chmod=F444",
        "--times",
        "--copy-links",
        from_path,
        to_path,
    ]

    log.info(" ".join(subprocess_cmd))

    # The following is a way to use the logging module with subprocess.
    # See
    # https://stackoverflow.com/questions/21953835/run-subprocess-and-print-output-to-logging.
    process = Popen(subprocess_cmd, stdout=PIPE, stderr=STDOUT)

    with process.stdout:
        for line in iter(process.stdout.readline, b""):
            log.info(line)

    if os.path.getsize(to_path) != os.path.getsize(from_path):
        log.error("copy failed for %s to %s", from_path, to_path)
def BuildVEL(args):

    print("Log CMake version")
    cmake_ver_cmd = 'cmake --version'
    RunShellCmd(cmake_ver_cmd)

    GTEST_DIR = RepoRelative("external/googletest")
    if not os.path.exists(GTEST_DIR):
        print("Clone Testing Framework Source Code")
        clone_gtest_cmd = f'git clone https://github.com/google/googletest.git {GTEST_DIR}'
        RunShellCmd(clone_gtest_cmd)

        print("Get Specified Testing Source")
        gtest_checkout_cmd = 'git checkout tags/release-1.10.0'
        RunShellCmd(gtest_checkout_cmd, GTEST_DIR)

    utils.make_dirs(VEL_BUILD_DIR)
    print("Run CMake for Extension Layer")
    cmake_cmd = f'cmake -DUPDATE_DEPS=ON -DCMAKE_BUILD_TYPE={args.configuration.capitalize()} {args.cmake} ..'
    if IsWindows(): cmake_cmd = cmake_cmd + f' -A {args.arch}'
    RunShellCmd(cmake_cmd, VEL_BUILD_DIR)

    print("Build Extension Layer and Tests")
    build_cmd = f'cmake --build . --config {args.configuration}'
    if not IsWindows(): build_cmd = build_cmd + f' -- -j{os.cpu_count()}'
    RunShellCmd(build_cmd, VEL_BUILD_DIR)
Example #3
0
    def save_pred_label(self, predict_label, iter):
        vid_name = None
        if iter == 1:
            vid_name = '보이그룹-안무하'
        elif iter == 2:
            vid_name = '걸그룹-안무하'
        elif iter == 3:
            vid_name = '솔로-안무중'
        elif iter == 4:
            vid_name = '걸그룹-안무중'
        elif iter == 5:
            vid_name = '보이그룹-안무중'

        og_vid_dir = join(self.original_vid_dir, vid_name)
        demo_dir = join(self.demo_dir, vid_name)
        label_dir = join(demo_dir, vid_name + '.csv')
        make_dirs(demo_dir)

        # save csv file
        predict_label = np.array(predict_label)
        with open(label_dir, "a") as output:
            writer = csv.writer(output, lineterminator='\n')
            for i in range(predict_label.shape[0]):
                writer.writerow(predict_label[i])
        # generate demo video
        vid_preprocess(og_vid_dir, demo_dir, predict_label[:, 1], self.in_fps,
                       self.out_fps, self.width, self.height)
def BuildVVL(args):

    print("Log CMake version")
    cmake_ver_cmd = 'cmake --version'
    RunShellCmd(cmake_ver_cmd)

    GTEST_DIR = RepoRelative("external/googletest")
    if not os.path.exists(GTEST_DIR):
        print("Clone Testing Framework Source Code")
        clone_gtest_cmd = f'git clone https://github.com/google/googletest.git {GTEST_DIR}'
        RunShellCmd(clone_gtest_cmd)

        print("Get Specified Testing Source")
        gtest_checkout_cmd = 'git checkout tags/release-1.8.1'
        RunShellCmd(gtest_checkout_cmd, GTEST_DIR)

    utils.make_dirs(VVL_BUILD_DIR)
    print("Run CMake for Validation Layers")
    cmake_cmd = f'cmake -DUPDATE_DEPS=ON -DCMAKE_BUILD_TYPE={args.configuration.capitalize()} {args.cmake} ..'
    if IsWindows(): cmake_cmd = cmake_cmd + f' -A {args.arch}'
    RunShellCmd(cmake_cmd, VVL_BUILD_DIR)

    print("Build Validation Layers and Tests")
    build_cmd = f'cmake --build . --config {args.configuration}'
    if not IsWindows(): build_cmd = build_cmd + f' -- -j{os.cpu_count()}'
    RunShellCmd(build_cmd, VVL_BUILD_DIR)

    print('Run vk_validation_stats.py')
    utils.make_dirs(
        os.path.join(VVL_BUILD_DIR, 'layers', args.configuration.capitalize()))
    RunShellCmd(
        f'python3 ../scripts/vk_validation_stats.py ../{EXTERNAL_DIR_NAME}/Vulkan-Headers/registry/validusage.json -text layers/{args.configuration.capitalize()}/vuid_coverage_database.txt',
        VVL_BUILD_DIR)
Example #5
0
def BuildMockICD(args):
    VT_DIR = RepoRelative("%s/Vulkan-Tools" % EXTERNAL_DIR_NAME)
    if not os.path.exists(VT_DIR):
        print("Clone Vulkan-Tools Repository")
        clone_tools_cmd = 'git clone https://github.com/KhronosGroup/Vulkan-Tools.git'
        RunShellCmd(clone_tools_cmd, EXTERNAL_DIR)

    ICD_BUILD_DIR = RepoRelative("%s/Vulkan-Tools/%s" %
                                 (EXTERNAL_DIR_NAME, BUILD_DIR_NAME))

    print("Running update_deps.py for ICD")
    RunShellCmd(
        f'python3 scripts/update_deps.py --dir {EXTERNAL_DIR_NAME} --config {args.configuration} --arch {args.arch}',
        VT_DIR)

    print("Run CMake for ICD")
    utils.make_dirs(ICD_BUILD_DIR)
    cmake_cmd = \
        f'cmake -DCMAKE_BUILD_TYPE={args.configuration.capitalize()} -DBUILD_CUBE=NO -DBUILD_VULKANINFO=NO -DINSTALL_ICD=OFF -DVULKAN_HEADERS_INSTALL_DIR={EXTERNAL_DIR}/Vulkan-Headers/{BUILD_DIR_NAME}/install {args.cmake} ..'
    RunShellCmd(cmake_cmd, ICD_BUILD_DIR)

    print("Build Mock ICD")
    build_cmd = f'cmake --build . --config {args.configuration}'
    if not IsWindows(): build_cmd = build_cmd + f' -- -j{os.cpu_count()}'
    RunShellCmd(build_cmd, ICD_BUILD_DIR)
def BuildMockICD(args):
    if not os.path.exists(RepoRelative("%s/Vulkan-Tools" % EXTERNAL_DIR_NAME)):
        print("Clone Vulkan-Tools Repository")
        clone_tools_cmd = 'git clone https://github.com/KhronosGroup/Vulkan-Tools.git'
        RunShellCmd(clone_tools_cmd, EXTERNAL_DIR)

    print("Run CMake for ICD")
    ICD_BUILD_DIR = RepoRelative("%s/Vulkan-Tools/%s" % (EXTERNAL_DIR_NAME,BUILD_DIR_NAME))
    utils.make_dirs(ICD_BUILD_DIR)
    cmake_cmd = \
        f'cmake -DCMAKE_BUILD_TYPE={args.configuration.capitalize()} -DBUILD_CUBE=NO -DBUILD_VULKANINFO=NO -DINSTALL_ICD=OFF -DVULKAN_HEADERS_INSTALL_DIR={EXTERNAL_DIR}/Vulkan-Headers/{BUILD_DIR_NAME}/install {args.cmake} ..'
    RunShellCmd(cmake_cmd, ICD_BUILD_DIR)

    VVL_REG_DIR = "%s/Vulkan-Headers/registry" % EXTERNAL_DIR
    VT_SCRIPTS_DIR = "%s/Vulkan-Tools/scripts" % EXTERNAL_DIR

    print ("Geneating ICD Source Code")
    VT_ICD_DIR = "%s/Vulkan-Tools/icd/generated" % EXTERNAL_DIR
    LVL_GEN_SCRIPT = RepoRelative("scripts/lvl_genvk.py")
    typemap_cmd = 'python3 %s -registry %s/vk.xml vk_typemap_helper.h' % (LVL_GEN_SCRIPT, VVL_REG_DIR)
    RunShellCmd(typemap_cmd, VT_ICD_DIR)

    KVT_GEN_SCRIPT = "%s/Vulkan-Tools/scripts/kvt_genvk.py" % EXTERNAL_DIR
    icd_cpp_cmd = 'python3 %s -registry %s/vk.xml mock_icd.cpp' % (KVT_GEN_SCRIPT, VVL_REG_DIR)
    RunShellCmd(icd_cpp_cmd, VT_ICD_DIR)

    icd_h_cmd = 'python3 %s -registry %s/vk.xml mock_icd.h' % (KVT_GEN_SCRIPT, VVL_REG_DIR)
    RunShellCmd(icd_h_cmd, VT_ICD_DIR)

    print("Build Mock ICD")
    build_cmd = f'cmake --build . --config {args.configuration}'
    if not IsWindows(): build_cmd = build_cmd + f' -- -j{os.cpu_count()}'
    RunShellCmd(build_cmd, ICD_BUILD_DIR)
Example #7
0
 def download_folder(self, bucket_name, folder_prefix, out_dir):
     bucket = boto3.resource('s3').Bucket(bucket_name)
     for file_object in bucket.objects.filter(Prefix=folder_prefix):
         object_name = file_object.key
         print(object_name)
         file_name = os.path.join(out_dir, object_name)
         make_dirs(os.path.dirname(file_name))
         self.download_file(file_name, bucket_name, object_name)
Example #8
0
    def save_summary(self, result: TrainingResult):
        result_dir = self.resultPathProvider.provide()
        make_dirs(result_dir)
        model_id = result.model_id

        result.model.save(os.path.join(result_dir, f"{model_id}.pb"))
        self.save_history(os.path.join(result_dir, f"{model_id}.json"),
                          result.metrics)
Example #9
0
    def __init__(self, args, dataloader_train, dataloader_dev, char_embedding_config, word_embedding_config,
                 sentence_encoding_config, pair_encoding_config, self_matching_config, pointer_config):

        # for validate
        expected_version = "1.1"
        with open(args.dev_json) as dataset_file:
            dataset_json = json.load(dataset_file)
            if dataset_json['version'] != expected_version:
                print('Evaluation expects v-' + expected_version +
                      ', but got dataset with v-' + dataset_json['version'],
                      file=sys.stderr)
            self.dev_dataset = dataset_json['data']

        self.dataloader_train = dataloader_train
        self.dataloader_dev = dataloader_dev

        self.model = RNet.Model(args, char_embedding_config, word_embedding_config, sentence_encoding_config,
                                pair_encoding_config, self_matching_config, pointer_config)
        self.parameters_trainable = list(
            filter(lambda p: p.requires_grad, self.model.parameters()))
        self.optimizer = optim.Adadelta(self.parameters_trainable, rho=0.95)
        self.best_f1 = 0
        self.step = 0
        self.start_epoch = args.start_epoch
        self.name = args.name
        self.start_time = datetime.datetime.now().strftime('%b-%d_%H-%M')

        if args.resume:
            if os.path.isfile(args.resume):
                print("=> loading checkpoint '{}'".format(args.resume))
                checkpoint = torch.load(args.resume)
                self.start_epoch = checkpoint['epoch']
                self.best_f1 = checkpoint['best_f1']
                self.name = checkpoint['name']
                self.step = checkpoint['step']
                self.model.load_state_dict(checkpoint['state_dict'])
                self.optimizer.load_state_dict(checkpoint['optimizer'])
                self.start_time = checkpoint['start_time']

                print("=> loaded checkpoint '{}' (epoch {})"
                      .format(args.resume, checkpoint['epoch']))
            else:
                raise ValueError("=> no checkpoint found at '{}'".format(args.resume))
        else:
            self.name += "_" + self.start_time

        # use which device
        if torch.cuda.is_available():
            self.model = self.model.cuda(args.device_id)
        else:
            self.model = self.model.cpu()

        self.loss_fn = torch.nn.CrossEntropyLoss()

        configure("log/%s" % (self.name), flush_secs=5)
        self.checkpoint_path = os.path.join(args.checkpoint_path, self.name)
        make_dirs(self.checkpoint_path)
def rsync_file(file_instance, to_storage, tantalus_api):
    """ Rsync a single file from one storage to another
    """

    # Get the file resource associated with the file instance
    file_resource = tantalus_api.get("file_resource",
                                     id=file_instance["file_resource"])

    local_filepath = get_new_filepath(to_storage, file_resource)

    remote_filepath = file_instance["filepath"]

    if file_instance["file_resource"]["is_folder"]:
        local_filepath = local_filepath + "/"
        remote_filepath = remote_filepath + "/"

    if os.path.isfile(local_filepath):
        if check_file_same_local(file_instance["file_resource"],
                                 local_filepath):
            return
        error_message = "target file {filepath} already exists on {storage} with different size".format(
            filepath=local_filepath, storage=to_storage["name"])
        raise FileAlreadyExists(error_message)

    if file_instance["storage"]["server_ip"] == to_storage["server_ip"]:
        remote_location = remote_filepath
    else:
        remote_location = (file_instance["storage"]["username"] + "@" +
                           file_instance["storage"]["server_ip"] + ":" +
                           remote_filepath)

    make_dirs(os.path.dirname(local_filepath))

    subprocess_cmd = [
        "rsync",
        "--progress",
        # '--info=progress2',
        "--chmod=D555",
        "--chmod=F444",
        "--times",
        "--copy-links",
        remote_location,
        local_filepath,
    ]

    if file_instance["file_resource"]["is_folder"]:
        subprocess_cmd.insert(1, "-r")

    sys.stdout.flush()
    sys.stderr.flush()
    subprocess.check_call(subprocess_cmd, stdout=sys.stdout, stderr=sys.stderr)

    if not check_file_same_local(file_instance["file_resource"],
                                 local_filepath):
        error_message = "transfer to {filepath} on {storage} failed".format(
            filepath=local_filepath, storage=to_storage["name"])
        raise Exception(error_message)
Example #11
0
def main(_):
    utils.make_dirs(FLAGS.outdir)

    logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',
                        filename='{}/eval.log'.format(FLAGS.outdir),
                        filemode='w',
                        level=logging.INFO)

    evaluate(FLAGS.outdir)
Example #12
0
 def get_chunk_paths_and_hashes(self, filepath):
     """
     Compute the hashes of chunks of a file.
     :param filepath: path of file
     :return: dictionary, with key is path of chunk 
         and value is string of hash of this chunk.
     """
     shutil.rmtree(self._tmp_dir)
     make_dirs(self._tmp_dir)
     return split_file_and_get_hash(filepath, self._tmp_dir)
Example #13
0
def convert_to_tfrecord(x, outdir, fname):
    utils.make_dirs(outdir)

    fn = os.path.join(outdir, fname + '.tfrecord')
    writer = tf.python_io.TFRecordWriter(fn)
    print('Writing', fn)
    if isinstance(x[0], (list, tuple)):
        _convert_pair_to_tfrecord(x, writer)
    else:
        _convert_nopair_to_tfrecord(x, writer)
Example #14
0
    def __init__(self):
        self.train_frames = args.train_frames
        print("===> Building model")
        # Model Setting
        # prtrained model setting
        self.extractor = resnet101(num_classes=400,
                                   shortcut_type='B',
                                   cardinality=32,
                                   sample_size=args.crop_size,
                                   sample_duration=args.train_frames)

        # load pretrained model
        # eval 모드를 무조건 해야됨!
        weight = get_pretrain_weight(pretrain_path, self.extractor)
        self.extractor.load_state_dict(weight)
        self.extractor.eval()

        self.G = BaseNet(16384 * 4 * 4 * 4, 10)

        # optimizer
        self.optimizerG = optim.Adam(self.G.parameters(),
                                     lr=args.lr,
                                     betas=(0.9, 0.999),
                                     eps=1e-8)
        # loss
        self.BCE_loss = nn.BCELoss()
        self.L1_loss = nn.L1Loss()
        self.L2_loss = nn.MSELoss()
        # data
        self.train_data = train_data_loader
        # self.test_data = test_data_loader
        # training
        self.device = device
        self.epochs = args.epochs
        self.avg_G_loss_arr = []
        self.checkpoint = args.checkpoint

        # cuda
        if torch.cuda.is_available():
            self.extractor.cuda()
            self.G.cuda()

        # model save
        self.save_mname = args.save_model_name
        # checkpoint root
        make_dirs(log_path)
        self.log_dir = log_path + f'/{self.save_mname}'
        # self.load_check_dir =
        self.save_check_dir = make_dirs(self.log_dir) + '/' + 'checkpoint.pkl'

        # CSV logging system
        self.CSVlogger = LogCSV(log_dir=self.log_dir +
                                f"/{self.save_mname}_log.csv",
                                header=['epoch', 'avg_G_Loss', 'accuracy'])
Example #15
0
def main(
    storage_name,
    temp_output_dir,
    flowcell_id,
    bcl_dir,
    tag_name=None,
    update=False,
    no_bcl2fastq=False,
    threshold=20,
):

    storage = tantalus_api.get("storage", name=storage_name)
    storage_client = tantalus_api.get_storage_client(storage_name)

    make_dirs(temp_output_dir)

    datasets = list(
        tantalus_api.list(
            "sequence_dataset",
            sequence_lanes__flowcell_id=flowcell_id,
            dataset_type="FQ",
        ))

    if len(datasets) > 0:
        logging.warning("found dataset {}".format(','.join(
            [str(d["id"]) for d in datasets])))

    if not no_bcl2fastq:
        run_bcl2fastq(
            flowcell_id,
            bcl_dir,
            temp_output_dir,
        )

    # Import fastqs
    load_brc_fastqs(
        flowcell_id,
        temp_output_dir,
        storage_name,
        storage,
        tantalus_api,
        storage_client,
        tag_name=tag_name,
        update=update,
        threshold=threshold,
    )

    # add 4 lanes generated by bcl2fastq on colossus in order to be picked up for analysis
    add_lanes(flowcell_id)
Example #16
0
    def upload_new_version(self, new_file_object_paths):
        """
        Upload new version of backup if backup folder is modified.
        :param: new_file_object_paths: list of paths of new file_objects
        :return: string, the path of new metadata
        """
        for file_object_path in new_file_object_paths:
            object_name = os.path.relpath(file_object_path, self._PREFIX_PATH)
            object_name = replace_backslashes_with_forward_slashes(object_name)
            self._user.upload_file(file_object_path, self._bucket, object_name)

        new_metadata_dir = os.path.join(self._metadata_dir,
                                        "v{}".format(self._version))
        make_dirs(new_metadata_dir)
        self.upload_new_metadata(new_metadata_dir)
        return new_metadata_dir
Example #17
0
    def train(self):
        # network on training mode
        self.G.train()

        for epoch in range(self.epochs):
            epoch_G_loss = 0.0
            for iter, batch in enumerate(self.train_data, 1):
                # you must setting data to cpu to gpu using .to(self.device)
                # tensor shape : [batch_size, vid_num, frame_num, H, W, C]
                input, output = batch[0].to(self.device), batch[1].to(
                    self.device)
                # check the input tensor imgs
                tensor_imgsave(join(args.root_path, 'check_tensor'), input,
                               self.train_frames)

                # input video가 8개인데 이걸 1개 1개 분리
                # pretrained model에 하나하나 집어넣어줌
                # permute로 [N, V, F, H, W, C] to [N, V, C , H, W, F]
                features = self.extractor(input[:, 0, :, :, :, :].permute(
                    0, 4, 1, 2, 3))
                for i in range(1, args.num_of_vid):
                    features = torch.cat(
                        (features,
                         self.extractor(input[:, 0, :, :, :, :].permute(
                             0, 4, 1, 2, 3))),
                        dim=1)
                    print(features.shape)

                self.optimizerG.zero_grad()
                fake_out = self.G(input)
                G_loss = self.BCE_loss(fake_out, output)
                G_loss.backward()
                self.optimizerG.step()

                epoch_G_loss += G_loss.data
                print("===> Epoch[{}]({}/{}): G_Loss: {:.4f} ".format(
                    epoch + 1, iter, len(self.train_data), G_loss.item()))

            avg_G_loss = epoch_G_loss / len(self.train_data)
            self.avg_G_loss_arr.append(avg_G_loss.item())

            # learning rate decay
            if (epoch + 1) % (self.epochs / 2) == 0:
                for param_group in self.optimizerG.param_groups:
                    param_group['lr'] /= 2.0
                print('G: Learning rate decay: lr={}'.format(
                    self.optimizerG.param_groups[0]['lr']))

            # test PSNR and check the training state
            if (epoch + 1) % self.checkpoint == 0:
                # save training img
                train_log_dir = make_dirs(self.log_dir + '/training_img')
                # tensor_imgsave(train_log_dir, fake_out, target, epoch=epoch)
                # testing set5
                # make loss graph
                # make_graph(np.array(range(epoch + 1)), np.array(self.avg_G_loss_arr), self.model)

                # save check point
                check_name = self.save_check_dir + '_epoch_' + str(epoch + 1)
                torch.save(self.model, check_name)
Example #18
0
def imgsave(log_dir, img, frame_num, denorm):
    """
    Todo numpy array img save
    :param log_dir: log directory for image saving
    :param img: save images
    :param frame_num: number of image for saving
    :param denorm: denormalization method in input images
    """
    now = time.gmtime(time.time())
    t = '{0}_{1}'.format(now.tm_min, now.tm_sec)
    make_dirs(log_dir)

    # [V, D, H, W, C]
    for i in range(frame_num):
        img0 = img[0, i, :, :, :]
        img0 = denorm(img0)
        img1 = img[1, i, :, :, :]
        img1 = denorm(img1)

        img2 = img[2, i, :, :, :]
        img2 = denorm(img2)

        img3 = img[3, i, :, :, :]
        img3 = denorm(img3)

        img_tmp1 = np.concatenate((img0, img1, img2, img3), axis=0)
        img4 = img[4, i, :, :, :]
        img4 = denorm(img4)

        img5 = img[5, i, :, :, :]
        img5 = denorm(img5)

        img6 = img[6, i, :, :, :]
        img6 = denorm(img6)

        img7 = img[7, i, :, :, :]
        img7 = denorm(img7)

        img_tmp2 = np.concatenate((img4, img5, img6, img7), axis=0)

        # imgsave를 위해선 uint8로 casting 해줘야함
        img_out = np.concatenate((img_tmp1, img_tmp2), axis=1).astype('uint8')

        plt.imsave(log_dir + '/time_' + t + '_frame_' + str(i),
                   img_out,
                   format='png')
Example #19
0
def main(_):
    current_time = datetime.now().strftime("%Y%m%d-%H%M")
    outdir = os.path.join(
        FLAGS.outdir, FLAGS.arch + '-' + current_time + '-' + str(os.getpid()))
    trained_model = os.path.join(outdir, 'trained_model')
    summary_dir = os.path.join(outdir, 'summary')

    utils.make_dirs(trained_model)
    utils.print_arguments(args=FLAGS.__flags,
                          log_fn=os.path.join(outdir, 'args.log'))

    logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',
                        filename='{}/train.log'.format(outdir),
                        filemode='w',
                        level=logging.INFO)

    train(trained_model, summary_dir)
Example #20
0
 def backup_dir_prompt(self):
     try:
         prompt = "Enter a directory to save your backup: "
         # NOTE: for test
         backup_dir = "../test"
         #backup_dir = str(input(prompt).strip())
         backup_dir = os.path.abspath(backup_dir)
         try:
             print("your backup_dir: ", backup_dir)
             make_dirs(backup_dir)
         except:
             print("Invalid directory!")
             return
         return backup_dir
     except:
         print("Timeout!")
         return
def main():
    url_info = get_cmake_url()
    cmake_exe_path = get_cmake_exe_path(url_info)

    if not os.path.exists(cmake_exe_path):
        url = urlparse(url_info['url'])
        print(f'cmake minimum version does not exist locally; downloading from {url_info["url"]}')
        cmake_archive = os.path.basename(url.path)
        with utils.URLRequest(url) as res:
            with open(cmake_archive, 'wb') as fd: fd.write(res.read())
        utils.expand_archive(cmake_archive)

    cmake_build_dir = 'build-cmake-test'
    utils.make_dirs(cmake_build_dir, clean=True)
    currDir = os.getcwd()
    cmake_args = get_cmake_args(cmake_exe_path)
    subprocess.check_call(cmake_args, cwd=cmake_build_dir)
Example #22
0
    def __init__(self, stat_cache_dir, backup_folder):
        self._stat_cache_dir = stat_cache_dir  # str, abs path
        self._backup_folder = backup_folder  # str, abs path

        # latest file backup
        self._latest_stat_cache_dir = os.path.join(stat_cache_dir, "latest")
        make_dirs(self._latest_stat_cache_dir)

        # newest file backup; compared to latest
        self._new_stat_cache_dir = os.path.join(stat_cache_dir, "new")
        make_dirs(self._new_stat_cache_dir)

        # root stat file
        self._root_stat = os.path.join(
            self._latest_stat_cache_dir,
            os.path.basename(self._backup_folder) + ".dir")

        self.initialized = True
Example #23
0
    def save_pred_label(self, predict_label, iter):
        vid_name = None
        if iter == 1:
            vid_name = '오마이걸-불꽃놀이'

        og_vid_dir = join(self.original_vid_dir, vid_name)
        demo_dir = join(self.demo_dir, vid_name)
        label_dir = join(demo_dir, vid_name + '.csv')
        make_dirs(demo_dir)

        # save csv file
        with open(label_dir, "a") as output:
            writer = csv.writer(output, lineterminator='\n')
            for i in range(predict_label.shape[0]):
                writer.writerow(predict_label[i])
        # generate demo video
        make_stage_mix(og_vid_dir, demo_dir, predict_label[:, 1], self.in_fps,
                       self.out_fps, self.width, self.height)
    def download_from_blob(self, file_instance, to_storage, tantalus_api):
        """ Transfer a file from blob to a server.

        This should be called on the from server.
        """

        cloud_filepath = file_instance["filepath"]
        cloud_container, cloud_blobname = cloud_filepath.split("/", 1)
        assert cloud_container == file_instance["storage"]["storage_container"]

        # Get the file resource associated with the file instance
        file_resource = tantalus_api.get("file_resource",
                                         id=file_instance["file_resource"])

        local_filepath = get_new_filepath(to_storage, file_resource)

        make_dirs(os.path.dirname(local_filepath))

        if not self.block_blob_service.exists(cloud_container, cloud_blobname):
            error_message = "source file {filepath} does not exist on {storage} for file instance with pk: {pk}".format(
                filepath=cloud_filepath,
                storage=file_instance["storage"]["name"],
                pk=file_instance["id"],
            )
            raise FileDoesNotExist(error_message)

        if os.path.isfile(local_filepath):
            if self._check_file_same_blob(file_resource, cloud_container,
                                          cloud_blobname):
                return

            error_message = "target file {filepath} already exists on {storage}".format(
                filepath=local_filepath, storage=to_storage["name"])
            raise FileAlreadyExists(error_message)

        self.block_blob_service.get_blob_to_path(
            cloud_container,
            cloud_blobname,
            local_filepath,
            progress_callback=TransferProgress().print_progress,
            max_connections=1,
        )

        os.chmod(local_filepath, 0o444)
Example #25
0
    def checkpoint_set(self):
        """
        Todo setting checkpoint argument
        :param self.save_mname: saved model name in each checkpoint
        :param self.log_dir: location of checkpoint saving folder
        :param self.save_check_dir: filename of checkpoint
        :param self.CSVlogger: CSVlogger setting for check a loss
        """
        # model save
        self.save_mname = args.save_model_name
        # checkpoint root
        make_dirs(log_path)
        self.log_dir = log_path + f'/{self.save_mname}'
        self.save_check_dir = make_dirs(self.log_dir)
        self.graph_dir = self.log_dir

        # CSV logging system
        self.CSVlogger = LogCSV(log_dir=self.log_dir + f"/{self.save_mname}_log.csv",
                                header=['epoch', 'avg_G_Loss', 'accuracy'])
Example #26
0
    def test(self, epoch, avg_G_loss, avg_D_loss):
        avg_accuracy = 0
        # test for set 5
        with torch.no_grad():
            for i, batch in enumerate(self.test_data, 1):
                input, target = batch[0].to(self.device), batch[1].to(
                    self.device)
                predict = self.G(input)

                accuracy = calculate_accuracy(predict, target)

                avg_accuracy += accuracy

                # save generated and target img
                img_dir = join(self.log_dir, 'epoch_' + f'{epoch}')
                make_dirs(img_dir)
            avg_accuracy = accuracy / len(self.test_data)

        print("===> Avg. PSNR: {:.4f} dB".format(avg_accuracy))
        self.CSVlogger([epoch + 1, avg_G_loss, avg_D_loss, avg_accuracy])
Example #27
0
 def _retrieve_backup_data_from_file_objects_and_metadata(
         self, encrypted_file_objects_dir, metadata_dir, backup_data_dir,
         original_metadata_dir):
     """
     Decrypt data keys by control key, then decrypt file objects by data
     key and join these file objects
     :param encrypted_file_objects_dir: string, path of directory containing
         encrypted file objects downloaded from S3
     :param metadata_dir: string, path of metadata_dir
     :param backup_data_dir: string, path of folder containing retrieved backup
         data
     :param original_metadata_dir: metadata dir in the first call
         (not recursive)
     :return
     """
     for metadata_path in os.listdir(metadata_dir):
         metadata_path = os.path.join(metadata_dir, metadata_path)
         if os.path.isfile(metadata_path):
             backup_file_path_rel = os.path.relpath(metadata_path,
                                                    original_metadata_dir)
             backup_file_path_rel = backup_file_path_rel[:
                                                         backup_file_path_rel
                                                         .find(".metadata")]
             metadata = Metadata.read(metadata_path)
             data_keys = self.decrypt_data_keys(
                 metadata.encrypted_data_keys)
             backup_file_path = os.path.join(backup_data_dir,
                                             backup_file_path_rel)
             make_dirs(os.path.dirname(backup_file_path))
             with open(backup_file_path, "wb") as f:
                 for file_id, data_key in \
                         zip(metadata.file_ids, data_keys):
                     encrypted_file_object_path = os.path.join(
                         encrypted_file_objects_dir, str(file_id))
                     chunk = decryptFile(data_key,
                                         encrypted_file_object_path)
                     f.write(chunk)
         else:
             self._retrieve_backup_data_from_file_objects_and_metadata(
                 encrypted_file_objects_dir, metadata_path, backup_data_dir,
                 original_metadata_dir)
def BuildVVL(args, build_tests=False):

    print("Log CMake version")
    cmake_ver_cmd = 'cmake --version'
    RunShellCmd(cmake_ver_cmd)

    utils.make_dirs(VVL_BUILD_DIR)
    print("Run CMake for Validation Layers")
    cmake_cmd = f'cmake -DUPDATE_DEPS=ON -DCMAKE_BUILD_TYPE={args.configuration.capitalize()} {args.cmake} ..'
    if IsWindows(): cmake_cmd = cmake_cmd + f' -A {args.arch}'
    if build_tests: cmake_cmd = cmake_cmd + ' -DBUILD_TESTS=ON'
    RunShellCmd(cmake_cmd, VVL_BUILD_DIR)

    print("Build Validation Layers and Tests")
    build_cmd = f'cmake --build . --config {args.configuration}'
    if not IsWindows(): build_cmd = build_cmd + f' -- -j{os.cpu_count()}'
    RunShellCmd(build_cmd, VVL_BUILD_DIR)

    print('Run vk_validation_stats.py')
    utils.make_dirs(os.path.join(VVL_BUILD_DIR, 'layers', args.configuration.capitalize()))
    RunShellCmd(f'python3 ../scripts/vk_validation_stats.py ../{EXTERNAL_DIR_NAME}/Vulkan-Headers/registry/validusage.json -text layers/{args.configuration.capitalize()}/vuid_coverage_database.txt', VVL_BUILD_DIR)
Example #29
0
    def _create_new_metadata_of_modified_file(self, filepath, file_ids,
                                              data_keys):
        """
        Create new metadata of a modified file
        :param filepath: path of modified file
        :param file_ids: list integer, ids of file objects 
            which are chunks of this file
        :param data_keys: list string, keys to encrypt/decrypt file objects
        :return
        """

        encrypted_data_keys = self.encrypt_data_keys(data_keys)

        relative_path_from_backup_root = os.path.relpath(
            filepath, self._backup_folder)
        metadata = Metadata(relative_path_from_backup_root, file_ids,
                            encrypted_data_keys, self._version)
        path = os.path.join(self._metadata_dir, "v{}".format(self._version),
                            relative_path_from_backup_root + ".metadata")
        make_dirs(os.path.dirname(path))
        metadata.save(path)
        return path
Example #30
0
    def _copy_old_metadata_and_get_set_file_ids_if_unmodified(
            self, list_unmodified_files):
        """
        Copy metadata of old version to current version
        :param list_unmodified_files: list of strings, each string is
            a path of an unmodified file
        :return: set of integers, containing file ids of unmodified files
        """
        set_file_ids = set()
        for filepath in list_unmodified_files:
            relative_path_from_backup_root = os.path.relpath(
                filepath, self._backup_folder)
            new_version_path = os.path.join(
                self._metadata_dir, "v{}".format(self._version),
                relative_path_from_backup_root + ".metadata")
            old_version_path = os.path.join(
                self._metadata_dir, "v{}".format(self._version - 1),
                relative_path_from_backup_root + ".metadata")
            make_dirs(os.path.dirname(new_version_path))
            if os.path.isfile(old_version_path):
                # file already exists
                shutil.copy(old_version_path, new_version_path)
                file_ids = Metadata.read(new_version_path).file_ids
                for file_id in file_ids:
                    set_file_ids.add(file_id)
            else:
                # deal with duplicated file: create new metadata for them
                chunk_paths_and_hashes = \
                        self._object_db.get_chunk_paths_and_hashes(filepath)
                file_ids = []
                data_keys = []
                for chunk_path, h in chunk_paths_and_hashes.items():
                    file_id, data_key = self._object_db.query(h)
                    file_ids.append(file_id)
                    data_keys.append(data_key)
                self._create_new_metadata_of_modified_file(
                    filepath, file_ids, data_keys)

        return set_file_ids