Esempio n. 1
0
def generate_html(config, output_dir):
    articles, categories, months, pages = index_blog_structure(config)

    file_loader = FileSystemLoader(
        os.path.join('themes', config['theme'], 'templates'))
    env = Environment(loader=file_loader)
    env.globals = get_globals(config, articles, categories, months, pages)

    clear_dir(output_dir)
    generate_home(config, output_dir, env)
    generate_archives_index(config, output_dir, env)

    for category in categories:
        generate_category_collection(config, output_dir, env, category)

    for month in months:
        generate_month_collection(config, output_dir, env, month)

    for article in articles:
        generate_article(config, output_dir, env, article)

    for page in pages:
        generate_page(config, output_dir, env, page)

    copy_assets(config, output_dir)
Esempio n. 2
0
 def test_convert_mp3(self):
     dir_convert = join(TEST_DIR_RAW, CONVERTED_DIR)
     clear_dir(dir_convert)
     assert not os.listdir(dir_convert), "Dir not empty"
     convert_mp3_to_wav(TEST_DIR_RAW, TEST_LIST_AUDIO[0])
     assert TEST_LIST_AUDIO[0].replace("mp3", "wav") in \
            os.listdir(dir_convert), "MP3 conversion failed"
def diagnosis_by_fuzzing_entropy(program, fuzzing_dir, entropy_threshold, ratio_min, ratio_max,
                                 fuzzed_files_per_iter=10, stop_iter=500, pre_fuzz_count=0):
    seedfiles_dir = os.path.join(fuzzing_dir, consts.SEEDFILES)
    matrix_file = None
    for granularity in [DLL_GRANULARITY, FUNCTION_GRANULARITY, DOMINATOR_GRANULARITY, XREF_GRANULARITY]:
        instances_dir = utils.clear_dir(os.path.join(fuzzing_dir, consts.INSTANCES, granularity, str(entropy_threshold)))
        current_entropy = float('inf')
        previous_entropy = float('-inf')
        tracing_data = generate_tracing_data(granularity, matrix_file)
        matrix_file = os.path.join(fuzzing_dir, consts.FUZZING_MATRIX.format("{0}_{1}".format(granularity, str(entropy_threshold))))
        if os.path.exists(matrix_file):
            os.remove(matrix_file)
        working_dir = utils.clear_dir(os.path.join(fuzzing_dir, consts.WORKING_DIR, granularity, str(entropy_threshold)))
        diagnosis_result = os.path.join(fuzzing_dir,consts.DLL_DIAGNOSIS_RESULT if granularity == DLL_GRANULARITY else consts.FUNCTION_DIAGNOSIS_RESULT)
        for seed_example in utils.get_files_in_dir(seedfiles_dir):
            shutil.copy2(seed_example, instances_dir)
            instance_path = os.path.join(instances_dir, os.path.basename(seed_example))
            run_debugger_on_files(program, [instance_path], working_dir, config, granularity, tracing_data)
        fuzzed_files = fuzz_project_dir(seedfiles_dir, instances_dir, pre_fuzz_count, ratio_min, ratio_max)
        run_debugger_on_files(program, fuzzed_files, working_dir, config, granularity, tracing_data)
        iter_ind = 0
        while abs(current_entropy - previous_entropy) > entropy_threshold:
            fuzzed_files = fuzz_project_dir(seedfiles_dir, instances_dir, fuzzed_files_per_iter, ratio_min, ratio_max)
            run_debugger_on_files(program, fuzzed_files, working_dir, config, granularity, tracing_data)
            diagnoser.campaign_matrix.create_matrix_for_dir(working_dir, diagnosis_result, matrix_file)
            sfl_matrix = readPlanningFile(matrix_file)
            sfl_matrix.diagnose()
            results = Diagnosis_Results(sfl_matrix.diagnoses, sfl_matrix.initial_tests, sfl_matrix.error)
            previous_entropy = current_entropy
            current_entropy = results.component_entropy
            iter_ind = iter_ind + 1
            if iter_ind > stop_iter:
                break
Esempio n. 4
0
def generate():
    posts = utils.get_posts()
    ppp = config['posts_per_page']
    pages = int(math.ceil(float(len(posts)) / ppp))

    utils.clear_dir('site/page')
    for i in range(pages):
        page_content = render_template('frontend/index.html',
                                       config=config,
                                       frontend=True,
                                       current=i + 1,
                                       first=(i == 0),
                                       last=(i == pages - 1),
                                       posts=posts[i * ppp:(i + 1) * ppp])
        file('site/page/%s.html' % (i + 1), 'w').write(
                                    page_content.encode(config['encoding']))
        if i == 0:
            file('site/index.html', 'w').write(
                                    page_content.encode(config['encoding']))

    not_found_content = render_template('404.html',
                                        config=config,
                                        frontend=True)
    file('site/404.html', 'w').write(
                                not_found_content.encode(config['encoding']))

    utils.clear_dir('site/posts')
    infos = utils.get_post_infos()

    feed = AtomFeed(config['title'],
                    feed_url=config['url_root'] + '/posts.atom',
                    url=config['url_root'])
    for info in infos:
        with open('posts/%s' % info['filename'], 'r') as f:
            content = f.read().decode(config['encoding'])
            title = utils.get_title(content)
            content = utils.postprocess_post_content(info['slug'],
                                                            content, False)
            html_content = render_template('frontend/post.html',
                                           config=config,
                                           frontend=True,
                                           title=title,
                                           content=content)
            file('site/posts/%s.html' % info['slug'], 'w').write(
                                    html_content.encode(config['encoding']))

            feed_content = render_template('feed.html',
                                           config=config,
                                           content=content)
            feed.add(title, feed_content, content_type='html',
                     url=make_external('/posts/' + info['slug']),
                     author='Tony Wang',
                     published=utils.date_localize_from_utc(info['time'],
                                                            True),
                     updated=utils.date_localize_from_utc(info['time'], True))

    file('site/posts.atom', 'w').write(str(feed.get_response().iter_encoded(config['encoding']).next()))

    return 'Done!'
def pred_pano_labels(pano_id,
                     path_to_gsv_scrapes,
                     GSV_IMAGE_WIDTH,
                     GSV_IMAGE_HEIGHT,
                     model_dir,
                     num_threads=4,
                     save_labeled_pano=True,
                     verbose=False):
    ''' takes a panorama id and returns a dict of the filtered predictions'''
    path_to_folder = os.path.join(path_to_gsv_scrapes, pano_id[:2], pano_id)
    path_to_xml = path_to_folder + ".xml"
    (GSV_IMAGE_WIDTH,
     GSV_IMAGE_HEIGHT) = utils.extract_width_and_height(path_to_xml)
    now = time.time()
    temp = os.path.join('temp', 'crops')
    if not os.path.exists(temp):
        os.makedirs(temp)
    if not os.path.exists('viz'):
        os.makedirs('viz')
    utils.clear_dir(temp)
    make_sliding_window_crops(pano_id,
                              GSV_IMAGE_WIDTH,
                              GSV_IMAGE_HEIGHT,
                              path_to_gsv_scrapes,
                              num_threads=num_threads,
                              verbose=verbose)

    model_name = utils.get_model_name()
    model_path = os.path.join(model_dir, model_name + '.pt')

    preds = predict_from_crops("temp", model_path, verbose=verbose)
    preds_loc = write_predictions_for_every_pano(path_to_gsv_scrapes,
                                                 preds,
                                                 verbose=verbose)
    if (len(preds_loc) == 0):
        return None
    pred = read_predictions_from_file(preds_loc)
    pred_dict = non_max_sup(pred,
                            radius=150,
                            clip_val=4.5,
                            ignore_ind=1,
                            verbose=verbose)

    if save_labeled_pano:
        pano_root = os.path.join(path_to_gsv_scrapes, pano_id[:2], pano_id)
        out_img = os.path.join("viz", pano_id + "_viz.jpg")
        show_predictions_on_image(pano_root,
                                  GSV_IMAGE_WIDTH,
                                  GSV_IMAGE_HEIGHT,
                                  pred_dict,
                                  out_img,
                                  show_coords=False,
                                  show_box=True,
                                  verbose=verbose)

    utils.clear_dir(temp)
    if verbose:
        print("{} took {} seconds".format(pano_id, time.time() - now))
    return pred_dict
Esempio n. 6
0
def make_directories(out_path, split, df):
    tmp_path = os.path.join(out_path, 'tmp')
    utils.make_dir(tmp_path)
    utils.clear_dir(tmp_path)
    split_path = os.path.join(out_path, split)
    utils.make_dir(split_path)
    if 'label' in df.columns:
        label_cat = df.label.astype('category')
        for label in label_cat.cat.categories:
            label_path = os.path.join(split_path, label)
            utils.make_dir(label_path)
    return tmp_path, split_path
    def set_workdir(self, workdir):
        self.workdir = workdir
        self.log_dir = os.path.join(self.workdir, "LOG_DIR")

        utils.clear_dir(self.log_dir)
        if not os.path.isdir(self.log_dir):
            os.mkdir(self.log_dir)

        try:
            self.log_file = open(os.path.join(self.log_dir, "log.txt"), "w")
        except Exception as e:
            print(e, file=sys.stderr)
            exit(-1)
Esempio n. 8
0
 def clear_save_name(self):
     """
     Clears all saved content for SAVE_NAME.
     """
     utils.clear_dir(self.model_save_dir)
     utils.clear_dir(self.summary_save_dir)
     utils.clear_dir(self.log_save_dir)
     utils.clear_dir(self.image_save_dir)
     print('Clear stuff in {}'.format(os.path.join(self.save_dir)))
Esempio n. 9
0
def template_test():
    try:
        for f_name in session['f_names']:
            app.logger.info(
                f'******* clear out {app.root_path}/testoutput{f_name} from  = {session}'
            )
            clear_dir(out_path=app.root_path, f_name=f_name)
    except:
        pass
    session['my_encounters'] = []
    session['f_names'] = []
    session['resource_list'] = []

    app.logger.info(f'******* sessions = {session}')
    #cache.get('f_name') #clear upload files if present in cache.
    #cache.clear()  # clear all the cache *TODO  switch over to db*

    my_string = '''This is a simple Flask App FHIR Facade which:

For single "real-time" Notifications:

  1. Fetches *Admit* and *Discharge* Encoounters from the {ref_server} Reference Server
  1. Builds the Da Vinci Notifications Message Bundle
  1. Submits the Message to the nominated endpoint using the `$process-message` operation
  1. Receives and displays the $process-message operation response from the server

For a Batch Transaction of multiple Notification:

  1. Fetches all the relevant *Admit* and *Discharge* Encoounters from the {ref_server} Reference Server
  1. Builds a transaction Bundle with:
     1. the Da Vinci Notifications Message Bundle as entries
     1. `POST` for the request method
     1. `/$process-message` for the request url
  1. Submits the transaction Bundle to the nominated endpoint using the `POST` operation
  1. Receives and displays the "transaction-response" response from the server.
'''.format(ref_server=ref_server_name)
    return render_template(
        'template.html',
        ref_server=ref_server_name,
        enc_list=enc_list,
        title="Index",
    )
Esempio n. 10
0
def download(save_dir, rewrite=False):
    save_dir = utils.get_dir()
    if rewrite:
        utils.clear_dir(save_dir)

    filename = DATA_URL.split(os.pathsep)[-1]
    filepath = os.path.join(save_dir, filename)
    if not os.path.isfile(filepath):

        def _progress(count, block_size, total_size):
            sys.stdout.write('\r>> Downloading %s %.1f%%' %
                             (filename, float(count * block_size) /
                              float(total_size) * 100.0))
            sys.stdout.flush()

        filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
        print()
        statinfo = os.stat(filepath)
        print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')

    tarfile.open(filepath, 'r:gz').extractall(save_dir)
def batch_save_pano_labels(pano_ids,
                           path_to_gsv_scrapes,
                           model_dir,
                           num_threads=4,
                           verbose=False):
    ''' takes a panorama id and returns a dict of the filtered predictions'''
    start = time.time()
    crops = os.path.join('temp', 'crops')
    if not os.path.exists(crops):
        os.makedirs(crops)
    if not os.path.exists('viz'):
        os.makedirs('viz')
    utils.clear_dir(crops)

    for pano_id in pano_ids:
        now = time.time()

        pano_root = os.path.join(path_to_gsv_scrapes, pano_id[:2], pano_id)
        path_to_xml = pano_root + ".xml"
        (GSV_IMAGE_WIDTH,
         GSV_IMAGE_HEIGHT) = utils.extract_width_and_height(path_to_xml)
        make_sliding_window_crops(pano_id,
                                  GSV_IMAGE_WIDTH,
                                  GSV_IMAGE_HEIGHT,
                                  path_to_gsv_scrapes,
                                  num_threads=num_threads,
                                  verbose=verbose)

        model_name = "20ep_sw_re18_2ff2"
        model_path = os.path.join(model_dir, model_name + '.pt')

        preds = predict_from_crops("temp", model_path, verbose=verbose)
        preds_loc = write_predictions_for_every_pano(preds,
                                                     path_to_gsv_scrapes +
                                                     pano_id[:2],
                                                     "labels.csv",
                                                     verbose=verbose)
        utils.clear_dir(crops)
        print("{} took {} seconds".format(pano_id, time.time() - now))
    print("total time: {} seconds".format(time.time() - start))
Esempio n. 12
0
def clear_save_name():
    """
    Clears all saved content for SAVE_NAME.
    """
    utils.clear_dir(MODEL_SAVE_DIR)
    utils.clear_dir(SUMMARY_SAVE_DIR)
    utils.clear_dir(IMG_SAVE_DIR)
    print('Clear stuff in {}'.format(os.path.join(SAVE_DIR, SAVE_NAME)))
def generate_data(input_data, date_after, path_to_panos, ignore_null,
                  number_agree, path_to_summary, verbose, num_threads):
    crops = os.path.join("single", "crops")
    if not os.path.exists(crops):
        os.makedirs(crops)
    utils.clear_dir(crops)
    existing_labels = labels_already_made(path_to_panos)
    add_to_summary = {}
    dict_valid = read_validation_data(input_data, date_after, existing_labels,
                                      add_to_summary, number_agree, verbose)
    dict_image = generate_image_date(dict_valid, existing_labels, verbose)
    make_crop_threading(dict_image, path_to_panos, verbose, num_threads)
    get_results(verbose)
    rows_dict = exact_labels(ignore_null)
    labels_list = generate_labelrows(dict_valid)
    new_labels = write_summary_file(rows_dict, labels_list, add_to_summary,
                                    path_to_summary)
    if (verbose):
        print("Number of new labels is " + str(len(new_labels)))
    update_labels_already_made(new_labels, path_to_panos)
    utils.clear_dir(crops)
    if os.path.exists(path_to_completelabels):
        os.remove(path_to_completelabels)
Esempio n. 14
0
    def __init__(self):
        git(['checkout', 'master'])
        git(['commit', '--allow-empty', '-m', 'octoblog-publish notification'])
        git(['checkout', 'dev'])

        print('---> generating blog to www/ directory')
        clear_dir('www')
        config = json.load(open('config.json', 'r'))
        generate_html(config, 'www')
        print('---> done')

        SaveCommand()

        print('---> pushing contents of www/ to master')
        git(['checkout', 'master'])
        clear_dir(os.getcwd())
        git(['checkout', 'dev', '--', 'www'])
        flatten('www')
        git(['add', '.'])
        git(['commit', '-m', '"octoblog-publish files"'])
        git(['push', 'origin', 'master'])
        git(['checkout', 'dev'])
        print('---> done')
Esempio n. 15
0
def ReduceSingleThread(content_file):
    print u'创建倒排索引...'
    worker = ReduceWorker()
    reader = IOWorker([content_file], 20000)
    doc_list = []
    count = 0
    save_count = 0
    start_time = time.clock()
    clear_dir('tmp')
    while 1:
        task_raw_data = reader.GetLines()
        data_count = len(task_raw_data)
        if data_count == 0:
            break
        doc_list = Converter.ToDocList(task_raw_data, count)
        count += data_count
        term_inverter_list = worker.run(doc_list)
        term_inverter_string = Converter.TermInverterToString(
            term_inverter_list)
        save_count += 1
        IOWorker.SaveText('tmp\\%d' % save_count, term_inverter_string)
        print_speed(start_time, count)
    print u'保存完毕,共%d个临时文件' % save_count
Esempio n. 16
0
def main(argv):
    FLAGS = gflags.FLAGS
    gflags.DEFINE_string('srcfile', 'json/sports1m_test.json',
                         'jason file to read from[json/sports1m_test.json]')
    gflags.DEFINE_string('savedir', 'tmp',
                         'dstination directory to save files[tmp]')
    gflags.DEFINE_string('video_format', 'mp4', 'video format[mp4]')
    gflags.DEFINE_string('video_resolution', '360p',
                         'video resolution 360p, 720p and so on[360p]')
    gflags.DEFINE_integer('min_len', 4, 'Minimal length(4 seconds)')
    gflags.DEFINE_integer('max_len', 30, 'Maximal length (30 seconds)')
    gflags.DEFINE_string(
        'dstfile', None,
        'dstination json file[json/sports1m_test_(duration).json]')
    gflags.DEFINE_boolean('rewrite', True,
                          'rewrite everything in saved dir[True]')
    argv = FLAGS(argv)

    srcfile = FLAGS.srcfile
    dstfile = FLAGS.dstfile

    savedir = os.path.abspath(FLAGS.savedir)
    savedir = utils.get_dir(savedir)

    if FLAGS.rewrite:
        utils.clear_dir(savedir)

    raw_json_data = read_json(srcfile)
    if not dstfile:
        srcname, srcext = os.path.splitext(srcfile)
        dstfile = '{:s}-{:02d}-{:02d}{:s}'.format(srcname, FLAGS.min_len,
                                                  FLAGS.max_len, '.txt')
    download(raw_json_data,
             save_dir=savedir,
             dstfile=dstfile,
             max_len=FLAGS.max_len,
             min_len=FLAGS.min_len)
Esempio n. 17
0
def SlowMovie(vid_in_path,
              slow_factor=2,
              continuous_fine_tuning=False,
              tmp_dir='./slowed_movie_frames/',
              cpu_max_size=320):
    if np.log2(slow_factor) % 1 != 0:
        raise ValueError("Slow factor must be a power of 2!")

    def write_frame(frame, frame_no):
        out_path = os.path.join(tmp_dir, '{0:09d}.bmp'.format(frame_no))
        assert (not os.path.exists(out_path))
        cv2.imwrite(out_path, frame)

    def recursive_predict_and_write(f0, f2, frame0_no, frame_diff):
        #Interpolate the middle frame
        with torch.no_grad():
            f1 = net(f0, f2)['output_im']
        offset = frame_diff // 2
        #Recursively predict the extra frames
        if frame_diff > 2:
            recursive_predict_and_write(f0, f1, frame0_no, offset)
            recursive_predict_and_write(f1, f2, frame0_no + offset, offset)
        #Write the predicted frame
        write_frame(fh.tensor_to_numpy_bgr(f1[0]), frame0_no + offset)

    cap = cv2.VideoCapture(vid_in_path)
    if cap is None or not cap.isOpened():
        raise RuntimeError('Unable to open video: ',
                           os.path.abspath(vid_in_path))
    vid_dir, vid_in_name = os.path.split(vid_in_path)
    vid_out_name = os.path.splitext(vid_in_name)[0] + '_{0}x_slow.mp4'.format(
        slow_factor)
    vid_out_path = vid_dir + vid_out_name
    assert (not os.path.exists(vid_out_path))

    #Total number of frames
    n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    real_frames = Queue()

    if not os.path.exists(tmp_dir):
        os.mkdir(tmp_dir)
    utils.clear_dir(tmp_dir)
    fh = utils.FrameHandler(None)

    def compute_inbetween_frames(real_frames, idx):
        frame0_no, frame0 = real_frames.queue[idx]
        frame1_no, frame1 = real_frames.queue[idx + 1]
        #Convert the frames to tensors with minibatch size 1
        frame0 = default_collate([frame0])
        frame1 = default_collate([frame1])
        recursive_predict_and_write(frame0, frame1, frame0_no * slow_factor,
                                    slow_factor)

    video_long_edge = max(cap.get(cv2.CAP_PROP_FRAME_WIDTH),
                          cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    if not torch.cuda.is_available() and video_long_edge > cpu_max_size:
        print(
            "Because we are using CPU, all images will be resized to {0} on the long edge for speed."
            .format(cpu_max_size))
        scale = cpu_max_size / video_long_edge

    for frame_num in tqdm(range(n_frames)):
        ret, frame = cap.read()

        if not torch.cuda.is_available() and video_long_edge > cpu_max_size:
            frame = cv2.resize(frame, (0, 0), fx=scale, fy=scale)

        assert (ret)
        write_frame(frame, frame_num * slow_factor)
        real_frames.put((frame_num, fh.bgr_to_tensor(frame)))

        if real_frames.qsize() == 4:
            #We have four real frames in the queue, which lets us finetune if we want
            if continuous_fine_tuning:
                net.cft.finetune_4(real_frames)
            #Compute the intermediate frames between the two middle frames in the queue
            compute_inbetween_frames(real_frames, 1)

            #In the very beginning and end of our video, we need to predict the first and last frame pair, respectively
            if real_frames.queue[0][0] == 0:
                compute_inbetween_frames(real_frames, 0)
            if real_frames.queue[3][0] == n_frames - 1:
                compute_inbetween_frames(real_frames, 2)
            real_frames.get()

    fps = cap.get(cv2.CAP_PROP_FPS)
    ffmpeg_command = 'ffmpeg -f image2 -r {fps} -i "{tmp}%09d.bmp" -c h264 -crf 17 -y "{vid_out_path}"'.format(
        fps=fps, tmp=tmp_dir, vid_out_path=vid_out_path)
    print('Running', ffmpeg_command)
    print(
        "if it doesn't generate a movie you probably don't have ffmpeg installed."
    )
    os.system(ffmpeg_command)
Esempio n. 18
0
def analysis_pipeline():
    Config.prepare_env()

    # 1. find relevant docs
    x = input("Are you sure you want to [find relevant docs]? (y/n)")
    logger.debug("Step 1. user input: {}".format(x))
    if x == "y" or x == "Y":
        logger.info(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
        logger.info("Step 1. [find relevant docs]")
        data = utils.load_csv(Config.corpus_file_csv, "title",
                              "content")[:5000]
        logger.info("csv row num: {:,}".format(len(data)))
        rel_doc = RelevantDoc(data)
        rel_doc.find_relevant_texts(
            output_file="output/relevant/relevant_docs.csv")

    # 2. cluster docs
    x = input("Are you sure you want to [cluster documents]? (y/n)")
    logger.debug("user input: {}".format(x))
    if x == "y" or x == "Y":
        alg = input(
            "Which cluster method you want to use a:[kmeans], b:[dbscan] ? (a/b)"
        )
        logger.debug("Step 2. user input: {}".format(alg))
        alg_dic = {"a": "kmeans", "b": "dbscan"}
        if alg in alg_dic:
            alg = alg_dic[alg]
        else:
            raise ValueError("Invalid cluster method choice: {}".format(alg))
        logger.info(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
        logger.info("Step 2. [cluster docs]")
        data = utils.load_csv("output/relevant/relevant_docs.csv", "score",
                              "text")
        utils.clear_dir(Config.cluster_dir)
        cluster = Cluster(data)
        cluster.cluster_docs(alg)

    # 3. text summarization
    x = input("Are you sure you want to [summarize documents]? (y/n)")
    logger.debug("Step 3. user input: {}".format(x))
    if x == "y" or x == "Y":
        logger.info(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
        logger.info("Step 3. [summary docs]")
        x = input(
            "Use a:[original documents] or b:[cutted doc pieces (shorter)]? (a/b)"
        )
        utils.clear_dir(Config.summary_dir)
        root = Config.cluster_dir
        des_dir = Config.summary_dir
        for f in sorted(os.listdir(root)):
            if os.path.isfile(os.path.join(root, f)):
                if not f.endswith(".out"):
                    continue
                src_file = os.path.join(root, f)
                out_file = os.path.join(des_dir, f[:f.rfind(".out")] + ".sum")
                summary_model = Summary()
                if x == "b" or x == "B":
                    summary_model.split_long_docs(src_file)
                    summary_model.summary_docs(src_file=src_file + ".split",
                                               output_file=out_file)
                else:
                    summary_model.summary_docs(src_file=src_file,
                                               output_file=out_file)
        logger.info("summaries for each cluster are saved in dir: {}".format(
            Config.summary_dir))
        logger.info("finish summary all.")

    Config.release_resources()
    logger.info("finish pipeline.")
Esempio n. 19
0
            cfg["corpus"]["emb_dim"],
            cfg["model"]["layers"],
        )


if __name__ == "__main__":
    args = parser.parse_args(const.TRAIN_MODE)

    assert args.config

    with open(args.config, "r") as file:
        cfg = yaml.safe_load(file)

    out = Path(args.out)
    if out.is_dir():
        utils.clear_dir(out)
    else:
        out.mkdir(parents=True, exist_ok=True)

    train = Path(args.train)
    dev = Path(args.dev)

    # tensorflow.config.experimental.list_physical_devices('CPU'))

    # Instanciating word embedding model
    vlayer = TextVectorization(
        standardize=None,
        max_tokens=cfg["corpus"]["voc_len"],
        output_mode="int",
        output_sequence_length=cfg["corpus"]["seq_len"],
    )
Esempio n. 20
0
def tar_gz(in_file, out_dir, clear=True):
    if clear:
        clear_dir(out_dir)
    sudo_call("tar", ["xaf", path.expanduser(in_file), "-C", path.abspath(out_dir)])
    sudo_call("sync")
Esempio n. 21
0
 def test_open_wav(self):
     dir_convert = join(TEST_DIR_RAW, CONVERTED_DIR)
     assert os.listdir(dir_convert), "Empty dir"
     test_wav = join(dir_convert, os.listdir(dir_convert)[0])
     read_wav(test_wav)
     clear_dir(dir_convert)
Esempio n. 22
0
async def cache_clear():
    try:
        utils.clear_dir('cache')
    except:
        pass
    return RedirectResponse("/cache")
Esempio n. 23
0
    def dispatch(self, obj_response, client_data):

        if 'loaded_custom_wall' in client_data:
            self.__custom_img_data = client_data['loaded_custom_wall']
            automode_files[
                'wallFilePath'] = self.__on_custom_img_loaded_to_canvas()

        if 'loaded_custom_mask' in client_data:
            self.__custom_img_data = client_data['loaded_custom_mask']
            automode_files[
                'maskFilePath'] = self.__on_custom_img_loaded_to_canvas()

        if 'loaded_custom_sticker' in client_data:
            self.__custom_img_data = client_data['loaded_custom_sticker']
            automode_files[
                'stickerFilePath'] = self.__on_custom_img_loaded_to_canvas()

        if 'loaded_wall_gallery' in client_data:
            self.__wall_gallery_manager.reload_walls_and_masks_gallery(
                obj_response)

        if 'loaded_sticker_gallery' in client_data:
            self.__sticker_gallery_manager.reload_stickers_gallery(
                obj_response)

        if 'loaded_gallery_wall_file' in client_data:
            self.__wall_gallery_manager.on_input_new_wall_bttn_click(
                obj_response, client_data['loaded_gallery_wall_file'])

        if 'loaded_gallery_mask_file' in client_data:
            self.__wall_gallery_manager.on_input_custom_mask_bttn_click(
                obj_response, client_data['loaded_gallery_mask_file'])

        if 'loaded_gallery_sticker_file' in client_data:
            self.__sticker_gallery_manager.on_input_new_sticker_bttn_click(
                obj_response, client_data['loaded_gallery_sticker_file'])

        if 'clicked_gallery_wall_mask' in client_data:
            self.__wall_gallery_manager.on_wall_gallery_img_click(
                obj_response, client_data['clicked_gallery_wall_mask'])
            automode_files[
                'wallFilePath'] = self.__wall_gallery_manager._clicked_img_path

        if 'clicked_gallery_sticker' in client_data:
            self.__sticker_gallery_manager.on_sticker_gallery_img_click(
                obj_response, client_data['clicked_gallery_sticker'])
            automode_files[
                'stickerFilePath'] = self.__sticker_gallery_manager._clicked_img_path

        if 'delGalleryImg' in client_data:
            self.__on_delete_bttn_pressed(client_data['delGalleryImg'])

        if 'automode_settings' in client_data:
            automode_settings['sticker_center'] = client_data[
                'automode_settings'][0]
            automode_settings['repeat_x'] = client_data['automode_settings'][1]
            automode_settings['repeat_y'] = client_data['automode_settings'][2]
            automode_settings['opacity'] = client_data['automode_settings'][3]
            self.__auto_mode_manager.response_processed_image(obj_response)

        if 'downloaded' in client_data:
            utils.clear_dir(TMP_FOLDER)
Esempio n. 24
0
                    datefmt='%Y-%m-%d %H:%M:%S')

if arcpy.CheckExtension("Spatial") == "Available":
    arcpy.AddMessage("Checking out Spatial")
    arcpy.CheckOutExtension("Spatial")
else:
    arcpy.AddError("Unable to get spatial analyst extension")
    arcpy.AddMessage(arcpy.GetMessages(0))
    sys.exit(0)

arcpy.env.workspace = s.TEMP_DIR
arcpy.env.scratchWorkspace = s.TEMP_DIR
arcpy.env.overwriteOutput = True
utils.set_arc_env(s.ECOCOMMUNITIES_FE)

utils.clear_dir(s.TRIAL_DIR)
utils.mkdir(os.path.join(s.INPUT_DIR, 'fire'))
utils.mkdir(os.path.join(s.INPUT_DIR, 'garden'))
utils.mkdir(os.path.join(s.INPUT_DIR, 'pond'))
utils.clear_dir(s.TEMP_DIR)
utils.mkdir(s.TEMP_DIR)

logging.info('creating full extent ecocommunities')
ecocommunities_fe = arcpy.Raster(s.ECOCOMMUNITIES_FE)
# TODO: Is this obsolete? Should Lenape sites not already be in ecocomm grid? - ask Eric
lenape_sites = os.path.join(s.TEMP_DIR, 'lenape_sites.tif')
arcpy.PolygonToRaster_conversion(in_features=s.BUFFER_FE,
                                 value_field='RASTERVALU',
                                 out_rasterdataset=lenape_sites,
                                 cellsize=s.CELL_SIZE)
ecocommunities_fe = arcpy.sa.Con(
Esempio n. 25
0
# grads = opt.compute_gradients(loss)
# train_op = opt.apply_gradients(grads, global_step=global_step)
# train_step = opt.apply_gradients(grads)

update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
    train_step = tf.train.AdamOptimizer(lr).minimize(loss,
                                                     global_step=global_step)
    # train_step = tf.train.MomentumOptimizer(lr, momentum=0.9).minimize(loss, global_step=global_step)
# loss = tf.Print(loss, [lr, global_step])

sess = tf.Session()

log_dir = 'local/logs'
checkpoint_dir = 'local/checkpoints'
utils.clear_dir(log_dir)
utils.clear_dir(checkpoint_dir)

saver = tf.train.Saver()

summary_all = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(join(log_dir, 'train'), sess.graph)
# test_writer = tf.summary.FileWriter()

tf.global_variables_initializer().run(session=sess)

step = 0
best_iou = 0
best_epoch = 0
exp_start_time = time.time()
for epoch in range(1, num_epochs + 1):
logging.basicConfig(
    filename=s.LOGFILE,  # use same log file as initiate_disturbance
    # filemode='w',  # defaults to 'a' for append
    format='%(asctime)s %(levelname)s: %(message)s',
    level=logging.DEBUG,
    datefmt='%Y-%m-%d %H:%M:%S')

if arcpy.CheckExtension("Spatial") == "Available":
    arcpy.AddMessage("Checking out Spatial")
    arcpy.CheckOutExtension("Spatial")
else:
    arcpy.AddError("Unable to checkout spatial analyst extension")
    arcpy.AddMessage(arcpy.GetMessages(0))
    sys.exit(0)

utils.clear_dir(s.TEMP_DIR)
arcpy.env.workspace = s.TEMP_DIR
scratchdir = os.path.join(s.TEMP_DIR, 'scratch')
utils.mkdir(scratchdir)
arcpy.env.scratchWorkspace = scratchdir
# arcpy.env.workspace = os.path.join(s.TEMP_DIR, 'working.gdb')
# arcpy.env.scratchWorkspace = os.path.join(s.TEMP_DIR, 'working.gdb')
arcpy.env.overwriteOutput = True
utils.set_arc_env(s.ecocommunities)

if s.FROMSCRATCH:
    utils.clear_dir(s.OUTPUT_DIR)

utils.mkdir(os.path.join(s.OUTPUT_DIR, 'fire', 'burn_rasters'))
utils.mkdir(os.path.join(s.OUTPUT_DIR, 'garden'))
utils.mkdir(os.path.join(s.OUTPUT_DIR, 'pond'))
Esempio n. 27
0
 def __init__(self):
     os.makedirs('preview', exist_ok=True)
     clear_dir('preview')
     config = json.load(open('config.json', 'r'))
     config['url'] = 'file:///' + os.getcwd() + '/preview'
     generate_html(config, 'preview')