def convert_to_ply(cls, root_path, out_path):
        """Convert FacilityDataset to PLY format that is compatible with
    Synthia dataset. Assumes file structure as given by the dataset.
    Outputs the processed PLY files to `FACILITY_OUT_PATH`.
    """

        txtfiles = glob.glob(os.path.join(root_path, '*/*/*.txt'))
        for txtfile in tqdm(txtfiles):
            file_sp = os.path.normpath(txtfile).split(os.path.sep)
            target_path = os.path.join(out_path, file_sp[-3])
            out_file = os.path.join(target_path, file_sp[-2] + '.ply')

            if os.path.exists(out_file):
                print(out_file, ' exists')
                continue

            annotation, _ = os.path.split(txtfile)
            subclouds = glob.glob(os.path.join(annotation,
                                               'Annotations/*.txt'))
            coords, feats, labels = [], [], []
            for inst, subcloud in enumerate(subclouds):
                # Read ply file and parse its rgb values.
                xyz, rgbi = cls.read_txt(subcloud)
                _, annotation_subfile = os.path.split(subcloud)
                clsidx = cls.CLASSES.index(annotation_subfile.split('_')[0])

                coords.append(xyz)
                feats.append(rgbi)
                labels.append(np.ones((len(xyz), 1), dtype=np.int32) * clsidx)

            if len(coords) == 0:
                print(txtfile, ' has 0 files.')
            else:
                # Concat
                coords = np.concatenate(coords, 0)
                feats = np.concatenate(feats, 0)
                labels = np.concatenate(labels, 0)
                inds, collabels = ME.utils.sparse_quantize(
                    coords,
                    feats,
                    labels,
                    return_index=True,
                    ignore_label=255,
                    quantization_size=0.0001  # 0.01 = 1cm
                )
                pointcloud = np.concatenate(
                    (coords[inds], feats[inds], collabels[:, None]), axis=1)

                # Write ply file.
                mkdir_p(target_path)
                save_point_cloud(pointcloud,
                                 out_file,
                                 with_label=True,
                                 verbose=True,
                                 intensity=FacilityDatasetConverter.INTENSITY)
Esempio n. 2
0
def generate_splits(stanford_out_path):
  """Takes preprocessed out path and generate txt files"""
  split_path = './splits/stanford'
  mkdir_p(split_path)
  for i in range(1, 7):
    curr_path = os.path.join(stanford_out_path, f'Area_{i}')
    files = glob.glob(os.path.join(curr_path, '*.ply'))
    files = [os.path.relpath(full_path, stanford_out_path) for full_path in files]
    out_txt = os.path.join(split_path, f'area{i}.txt')
    with open(out_txt, 'w') as f:
      f.write('\n'.join(files))
Esempio n. 3
0
File: stag.py Progetto: noah/stag
 def init(self, argv):
     mkdir_p(path.join( BASE_PATH, "_output"))
     mkdir_p(path.join( BASE_PATH, "_posts"))
     mkdir_p(path.join( BASE_PATH, "_assets"))
     mkdir_p(path.join( BASE_PATH, "_templates"))
     cp(path.join(STAG_PATH, "_templates", "index.html"), TEMPLATE_PATH)
     cp(path.join(STAG_PATH, "_templates", "archive.html"), TEMPLATE_PATH)
     cp(path.join(STAG_PATH, "_templates", "base.html"), TEMPLATE_PATH)
     cp(path.join(STAG_PATH, "_templates", "post.html"), TEMPLATE_PATH)
     cp(path.join(STAG_PATH, "_templates", "post.skel"), TEMPLATE_PATH)
     cp(path.join(STAG_PATH, "stag.default.cfg"), path.join(BASE_PATH, "stag.cfg"))
     open(path.join(TEMPLATE_PATH, "ga.js"), 'w')
Esempio n. 4
0
 def init(self, argv):
     mkdir_p(path.join(BASE_PATH, "_output"))
     mkdir_p(path.join(BASE_PATH, "_posts"))
     mkdir_p(path.join(BASE_PATH, "_assets"))
     mkdir_p(path.join(BASE_PATH, "_templates"))
     cp(path.join(STAG_PATH, "_templates", "index.html"), TEMPLATE_PATH)
     cp(path.join(STAG_PATH, "_templates", "archive.html"), TEMPLATE_PATH)
     cp(path.join(STAG_PATH, "_templates", "base.html"), TEMPLATE_PATH)
     cp(path.join(STAG_PATH, "_templates", "post.html"), TEMPLATE_PATH)
     cp(path.join(STAG_PATH, "_templates", "post.skel"), TEMPLATE_PATH)
     cp(path.join(STAG_PATH, "stag.default.cfg"),
        path.join(BASE_PATH, "stag.cfg"))
     open(path.join(TEMPLATE_PATH, "ga.js"), 'w')
Esempio n. 5
0
def save_html_to_file(args, selections, all_urls, headers):

    sub_idx = 0
    prob_type_set = []
    counter_video = 1

    for selected_course, selected_sections in selections.items():
        coursename = directory_name(selected_course.name)

        for selected_section in selected_sections:
            section_dirname = "%02d-%s" % (selected_section.position,
                                           selected_section.name)
            target_dir = os.path.join(args.html_dir, coursename,
                                      clean_filename(section_dirname))
            mkdir_p(target_dir)

            for subsection in selected_section.subsections:

                if subsection.name == None:
                    subsection.name = 'Untitled'
                target_subdir = os.path.join(
                    target_dir,
                    str(sub_idx).zfill(3) + '-' +
                    clean_filename(subsection.name))
                mkdir_p(target_subdir)
                logging.info('url: ' + str(all_urls[sub_idx]) +
                             ', subsection: ' + str(sub_idx).zfill(3) + '-' +
                             str(subsection.name))
                page = get_page_contents(str(all_urls[sub_idx]), headers)
                soup = BeautifulSoup(page, "html.parser")

                #div contains all units (seq_contents_#)
                main_content = soup.find("div", {"class": "container"})

                units = crawl_units(main_content)
                counter = 0
                sub_idx = sub_idx + 1

                for unit in units:

                    filename_template = "seq_contents_" + str(
                        counter) + ".html"
                    filename = os.path.join(target_subdir, filename_template)

                    filename_template_txt = "seq_contents_" + str(
                        counter) + ".txt"
                    filename_txt = os.path.join(target_subdir,
                                                filename_template_txt)

                    filename_template_prob_txt = "seq_contents_" + str(
                        counter) + "_prob.txt"
                    filename_prob_txt = os.path.join(
                        target_subdir, filename_template_prob_txt)

                    filename_template_video_json = "seq_contents_" + str(
                        counter) + "_vdo.json"
                    filename_video_json = os.path.join(
                        target_subdir, filename_template_video_json)

                    logging.info('path: ' + str(target_subdir) +
                                 ', filename: ' + str(filename))

                    try:
                        file_ = sys.stdout if filename == '-' else codecs.open(
                            filename, 'w', 'utf-8')
                    except IOError as exc:
                        f = open('downloading_error_report.txt', 'a')
                        text = 'External command error ignored: ' + str(
                            exc) + '\n\n'
                        f.write(text)
                        f.close()
                        file_ = sys.stdout if filename == '-' else codecs.open(
                            filename_template, 'w', 'utf-8')

                    file_.writelines(unit.prettify(formatter=None))
                    file_.close()

                    soup = unit.prettify(formatter=None)
                    soup = BeautifulSoup(soup, "html.parser")

                    # select only html componert (disregard video, problem)
                    html_flag = soup.findAll("div",
                                             {"data-block-type": "html"})
                    if len(html_flag) > 0:

                        #create file only when html component exists
                        file_txt = sys.stdout if filename_txt == '-' else codecs.open(
                            filename_txt, 'w', 'utf-8')
                        text = ""
                        for soup_component in html_flag:
                            for s in soup_component.findAll([
                                    'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'p',
                                    'li'
                            ]):
                                text += s.getText() + " "

                        file_txt.writelines(text)
                        file_txt.close()
                        print(filename_txt + ' of text component was created')

                    # select only problem componert (disregard video, text)
                    prob_txt, prob_types = extract_problem_comp(soup)

                    if len(prob_txt) > 0:
                        file_prob_txt = sys.stdout if filename == '-' else codecs.open(
                            filename_prob_txt, 'w', 'utf-8')
                        for prob_type in prob_types:
                            prob_type_set.append(prob_type + ' \n')

                        file_prob_txt.writelines(prob_txt)
                        file_prob_txt.close()
                        print(filename_prob_txt +
                              ' of problem component was created')

                    tmp_video_dict = extract_video_component(
                        args, coursename, headers, soup,
                        clean_filename(section_dirname),
                        clean_filename(subsection.name),
                        "seq_contents_" + str(counter))
                    if len(tmp_video_dict) > 0:
                        file_video_json = sys.stdout if filename == '-' else codecs.open(
                            filename_video_json, 'w', 'utf-8')
                        video_unit_dict = dict()
                        for vd in tmp_video_dict:
                            video_unit_dict.update({
                                "video_block_" + str(counter_video).zfill(2):
                                vd
                            })
                            counter_video += 1
                        video_dict2json = json.dumps(video_unit_dict,
                                                     sort_keys=False,
                                                     indent=4,
                                                     separators=(',', ': '))
                        file_video_json.writelines(video_dict2json)
                        file_video_json.close()
                        print(filename_video_json +
                              ' of video component was created')
                    counter += 1

    save_urls_to_file(
        prob_type_set,
        os.path.join(args.html_dir, coursename, "all_prob_type.txt"))
Esempio n. 6
0
def save_html_to_file(args, selections, all_urls, headers):
    sub_idx = 0
    prob_type_set = []
    counter_video = 1
    counter_unit = 1
    txt_id = 1
    prob_id = 1
    video_id = 1
    comp_id = 1
    tmp_course_strut = dict()
    txt_dict_ls = dict()
    prob_dict_ls = dict()
    comp_dict_ls = dict()
    video_dict_ls = dict()
    for selected_course, selected_sections in selections.items():
        coursename = directory_name(selected_course.name)
        sourcepath = os.path.join(args.html_dir, coursename,
                                  'source_html_file')
        mkdir_p(sourcepath)
        #filename_meta = os.path.join(sourcepath, 'html_metadata.csv')

        metasec_ls = [[], [], [], []]
        for selected_section in selected_sections:
            section_dirname = "%02d-%s" % (selected_section.position,
                                           selected_section.name)
            tmp_course_strut['section'] = (section_dirname)

            for subsection in selected_section.subsections:

                if subsection.name == None:
                    subsection.name = 'Untitled'

                tmp_course_strut['subsection'] = (subsection.name)
                #logging.info('url: '+ str(all_urls[sub_idx]) )
                print(all_urls[sub_idx])
                page = get_page_contents(str(all_urls[sub_idx]), headers)
                soup = BeautifulSoup(page, "html.parser")

                #div contains all units (seq_contents_#)
                main_content = soup.find("div", {"class": "container"})

                units = crawl_units(main_content)

                sub_idx = sub_idx + 1

                for idx, unit in enumerate(units):

                    filename_template = str(counter_unit).zfill(4) + ".html"
                    filename = os.path.join(args.html_dir, coursename,
                                            'source_html_file',
                                            filename_template)

                    try:
                        file_ = sys.stdout if filename == '-' else codecs.open(
                            filename, 'w', 'utf-8')
                    except IOError as exc:
                        f = open('downloading_error_report.txt', 'a')
                        text = 'External command error ignored: ' + str(
                            exc) + '\n\n'
                        f.write(text)
                        f.close()
                        file_ = sys.stdout if filename == '-' else codecs.open(
                            filename_template, 'w', 'utf-8')

                    file_.writelines(unit.prettify(formatter=None))
                    file_.close()

                    soup = unit.prettify(formatter=None)
                    soup = BeautifulSoup(soup, "html.parser")

                    cur_unit = soup.find("h2", {
                        "class": "hd hd-2 unit-title"
                    }).getText()
                    if cur_unit == None:
                        cur_unit = 'Untitled'
                    tmp_course_strut['unit'] = (cur_unit)

                    logging.info('section: ' + tmp_course_strut['section'])
                    logging.info('     subsection: ' +
                                 tmp_course_strut['subsection'])
                    logging.info('                unit: ' +
                                 tmp_course_strut['unit'])

                    metasec_ls[0].append(tmp_course_strut['section'])
                    metasec_ls[1].append(tmp_course_strut['subsection'])
                    metasec_ls[2].append(tmp_course_strut['unit'])
                    metasec_ls[3].append(filename_template)

                    # select only html componert (disregard video, problem)
                    html_flag = soup.findAll("div",
                                             {"data-block-type": "html"})
                    if len(html_flag) > 0:

                        #create file only when html component exists
                        text = ""
                        for soup_component in html_flag:
                            for s in soup_component.findAll([
                                    'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'p',
                                    'li'
                            ]):
                                text += s.getText() + " "

                        tmp_dict = {
                            'text_block_' + str(txt_id).zfill(4): {
                                'section': tmp_course_strut['section'],
                                'subsection': tmp_course_strut['subsection'],
                                'unit': tmp_course_strut['unit'],
                                'content': text
                            }
                        }
                        txt_dict_ls.update(tmp_dict)
                        txt_id += 1

                    # select only problem componert (disregard video, text)
                    prob_txt, prob_types = extract_problem_comp(soup)

                    if len(prob_txt) > 0:
                        for prob_type in prob_types:
                            prob_type_set.append(prob_type + ' \n')

                        tmp_dict = {
                            'quiz_block_' + str(prob_id).zfill(4): {
                                'section': tmp_course_strut['section'],
                                'subsection': tmp_course_strut['subsection'],
                                'unit': tmp_course_strut['unit'],
                                'content': prob_txt
                            }
                        }
                        prob_dict_ls.update(tmp_dict)
                        #print(tmp_dict)
                        prob_id += 1

                    tmp_video_dict = extract_video_component(
                        args, coursename, headers, soup,
                        tmp_course_strut['section'],
                        tmp_course_strut['subsection'],
                        tmp_course_strut['unit'])
                    if len(tmp_video_dict) > 0:
                        video_unit_dict = dict()
                        for vd in tmp_video_dict:
                            video_unit_dict.update({
                                "video_block_" + str(counter_video).zfill(4):
                                vd
                            })
                            counter_video += 1

                        video_dict_ls.update(video_unit_dict)
                        video_id += 1

                    print(video_dict_ls)

                    counter_unit += 1

                    set_comp_types = soup.findAll("div",
                                                  {"data-block-type": True})
                    for comp_type in set_comp_types:
                        if comp_type['data-block-type'] in [
                                'html', 'video', 'problem'
                        ]:
                            comp_dict = {
                                str(comp_id).zfill(4) + '_' + comp_type['data-block-type']:
                                {
                                    'section': tmp_course_strut['section'],
                                    'subsection':
                                    tmp_course_strut['subsection'],
                                    'unit': tmp_course_strut['unit'],
                                    'type': comp_type['data-block-type']
                                }
                            }
                            comp_dict_ls.update(comp_dict)
                            comp_id += 1

    txt_dict2json = json.dumps(txt_dict_ls,
                               sort_keys=True,
                               indent=4,
                               separators=(',', ': '))
    prob_dict2json = json.dumps(prob_dict_ls,
                                sort_keys=True,
                                indent=4,
                                separators=(',', ': '))
    video_dict2json = json.dumps(video_dict_ls,
                                 sort_keys=True,
                                 indent=4,
                                 separators=(',', ': '))
    comp_dict2json = json.dumps(comp_dict_ls,
                                sort_keys=True,
                                indent=4,
                                separators=(',', ': '))

    with open(os.path.join(args.html_dir, coursename, 'all_textcomp.json'),
              'w',
              encoding='utf-8') as f:
        f.write(txt_dict2json)

    with open(os.path.join(args.html_dir, coursename, 'all_probcomp.json'),
              'w',
              encoding='utf-8') as f:
        f.write(prob_dict2json)

    with open(os.path.join(args.html_dir, coursename, 'all_videocomp.json'),
              'w',
              encoding='utf-8') as f:
        f.write(video_dict2json)

    with open(os.path.join(args.html_dir, coursename, 'all_comp.json'),
              'w',
              encoding='utf-8') as f:
        f.write(comp_dict2json)

    metafile_dict = {
        'section': metasec_ls[0],
        'subsection': metasec_ls[1],
        'unit': metasec_ls[2],
        'htmlfile': metasec_ls[3]
    }
    df = pd.DataFrame.from_dict(metafile_dict)
    df.to_csv(
        os.path.join(args.html_dir, coursename, 'source_html_file',
                     'metadata.csv'))

    save_urls_to_file(
        prob_type_set,
        os.path.join(args.html_dir, coursename, "all_prob_type.txt"))
    make_tarfile(os.path.join(args.html_dir, coursename, 'sourcefile.tar.gz'),
                 os.path.join(args.html_dir, coursename, 'source_html_file'))