Esempio n. 1
0
def render_line2(filename, frame_rate=60):
    if is_animation(parse_tags(filename)):
        data = []
        def get_line_data(filename, frame):
            filename = filename.replace('0000', '%04d' % frame)
            data_x = np.load(filename)
            data_y = np.load(filename.replace(',' + X_TAG, ',' + Y_TAG))
            return (data_x, data_y)
        def update_lines(frame, line):
            x, y = data[frame]
            line.set_data(x, y)
            return line,
        dirname = os.path.dirname(filename)
        basename = os.path.basename(filename).replace('0000', '[0-9][0-9][0-9][0-9]')
        seq = utils.get_all_files(dirname, [basename])
        seq.sort()
        if len(seq) == 0:
            return

        data_x, data_y = get_line_data(seq[0], 0)
        has_bbox = False
        for frame in range(len(seq)):
            data_x, data_y = get_line_data(seq[frame], frame)
            if len(data_x) > 0 and len(data_y) > 0:
                if has_bbox:
                    xmin = min(xmin, data_x.min())
                    xmax = max(xmax, data_x.max())
                    ymin = min(ymin, data_y.min())
                    ymax = max(ymax, data_y.max())
                else:
                    xmin = data_x.min()
                    xmax = data_x.max()
                    ymin = data_y.min()
                    ymax = data_y.max()
                    has_bbox = True
            data.append((data_x, data_y))

        fig, ax = plt.subplots()
        x, y = get_line_data(filename, 0)

        if has_bbox:
            xmid = (xmax + xmin) / 2.0
            ymid = (ymax + ymin) / 2.0
            new_xmin = xmid - 1.25 * (xmid - xmin)
            new_xmax = xmid + 1.25 * (xmax - xmid)
            new_ymin = ymid - 1.25 * (ymid - ymin)
            new_ymax = ymid + 1.25 * (ymax - ymid)
            ax.set_xlim(new_xmin, new_xmax)
            ax.set_ylim(new_ymin, new_ymax)
        ax.set_aspect('equal')
        line, = ax.plot(x, y, lw=2, marker='o', markersize=3)
        anim = animation.FuncAnimation(fig, update_lines, len(seq), fargs=(line,), interval=60, blit=False)
        output_filename = get_output_movie_filename(filename.replace(',' + X_TAG, ''))
        anim.save(output_filename, fps=frame_rate, bitrate=5000, writer=video_writer, extra_args=video_extra_args)
        plt.close(fig)
        print ('Rendered <%s>' % output_filename)
    else:
        output_filename = get_output_bitmap_filename(filename.replace(',' + X_TAG, ''))
        render_still_line2(filename, filename.replace(',' + X_TAG, ',' + Y_TAG), output_filename)
Esempio n. 2
0
def read_action(action_path):
    action_name=get_action_name(action_path)
    category=get_category(action_name)
    person=get_person(action_name)
    all_files=utils.get_all_files(action_path)
    all_files=utils.append_path(action_path+"/",all_files)
    frames=utils.read_images(all_files)
    return Action(category,person,frames)
Esempio n. 3
0
def load_data(path,batch_size=25):
    all_files=utils.get_all_files(path)
    all_files=utils.append_path(path,all_files)
    images=utils.read_images(all_files)
    images=utils.flatten_images(images)
    images=map(utils.normalize,images)
    images=np.array(images)
    n_batches=get_number_of_batches(batch_size,len(images))
    def get_batch(i):
        return images[i * batch_size: (i+1) * batch_size]
    batches=map(get_batch,range(n_batches))
    batches = [np.array(batch) for batch in batches]
    print("Dataset loaded")
    return np.array(batches)
Esempio n. 4
0
def main():
    """Hook spot for the console script."""

    parser = optparse.OptionParser()
    parser.add_option('-p', '--paths', action='store_true',
                      help='Preserve path structure of original files')

    parser.add_option('-d', '--directory', action='store', type='string',
                      dest='outdir', default='.',
                      help='The output directory that the rendered files should go to.')

    parser.add_option('-w', '--watch', action='store_true',
                      help='Watch original files and re-generate documentation on changes')

    parser.add_option('-a', '--all', action='store_true',
                      help='Get all files from subfolders')

    opts, sources = parser.parse_args()

    if not sources:
        return

    filepath = os.path.dirname(sources[0])
    start, filetype = os.path.splitext(sources[0])

    if start.endswith('*'):
        return

    start = os.path.dirname(os.path.dirname(os.path.abspath(start)))

    if opts.all:
        sources = [i for i in get_all_files(filepath or '.', filetype)]

    global SOURCES
    SOURCES = sorted([Source(name, start) for name in sources])

    process(SOURCES, outdir=opts.outdir, preserve_paths=opts.paths)

    # If the -w / --watch option was present, monitor the source directories
    # for changes and re-generate documentation for source files whenever they
    # are modified.
    if opts.watch:
        try:
            import watchdog.events
            import watchdog.observers
        except ImportError:
            sys.exit('The -w/--watch option requires the watchdog package.')

        monitor(sources, opts)
Esempio n. 5
0
def SPVideoFromSequence(context):
    args = context.args
    if args.referenceFolder:
        referenceFile = get_all_files(args.referenceFolder)

    VideoEd.video_from_sequence(
        args.inputData,
        os.path.join(args.outputData, args.fileName),
        referenceFile[0],
        args.ext,
        args.fps,
        args.bitrate,
        args.lossless,
    )

    return args.outputData
Esempio n. 6
0
def main(argv=None):
    args = docopt(__doc__, argv=argv)

    pictures = utils.get_all_files(args['<input>'])
    destination = args['<destination>']
    dry_run = args['--dry-run']
    move = args['--move']
    verbose = args['--verbose']

    groups = group_pictures(pictures)
    groups = group_consecutive_days(groups)

    for group, pics in groups.iteritems():
        print group, len(pics)

    send_to_destination(destination, groups, move=move, dry_run=dry_run, verbose=verbose)
Esempio n. 7
0
def bbai(dirs, outfile='./data_info', model=''):

    files = utils.get_all_files(dirs)
    if model == 'fametrecafe':
        model = '/home/pgrespan/trained/VGG16_2020-04-21_11-01/VGG16_13_0.88753_0.86502.h5'
    gen = LSTGenerator(h5files=files,
                       feature="energy",
                       shuffle=False,
                       intensity=None,
                       leakage2_intensity=None,
                       class_model=model)
    try:
        gen.get_all_info().to_pickle(outfile + '.pkl')
    except:
        print('No riesso catare a cartea, probabilmente no esiste, molton!')

    return gen.indexes
Esempio n. 8
0
def test_classifiers():
    model_dir= 'models/'

    test_file = 'feature/clean_features.csv'
    result_dir = 'result/'
    result_file = 'result.csv'
    model_files = get_all_files(model_dir)

    for mfile  in model_files:
        model_file = os.path.join(model_dir,mfile) #get the path of the model file

        with open(model_file, 'rb') as fp:
            trained_model = cPickle.load(fp)  # load trained model
        data = np.genfromtxt(test_file, delimiter=',') # read the test file
        row, col = data.shape
        x = data[:, 1:col - 1]  # get data without the label information
        y_true = data[:, col - 1] # get the label information
        y_pred = trained_model.predict(x) # perform prediction

        acc = accuracy_score(y_true, y_pred)

        f1 = f1_score(y_true, y_pred, average=None)
        prc = precision_score(y_true, y_pred, average=None)
        rcl = recall_score(y_true, y_pred, average=None)


        if not os.path.exists(result_dir):
            os.makedirs(result_dir)
        res_file = os.path.join(result_dir,result_file)

        with open(res_file,'w') as fp:
            print 'writing individual results...'
            fp.write("acc :: " + str(acc))
            fp.write('\n\n')

            fp.write('f1 :: ' + ','.join(str(x) for x in f1))
            fp.write('\n\n')

            fp.write("precision :: " + ','.join(
                str(x) for x in prc))
            fp.write('\n\n')

            fp.write("recall :: " + ','.join(str(x) for x in rcl))
            fp.write('\n\n\n')
Esempio n. 9
0
def main():
    include_dir = os.path.join(dirname, '../include/jet')
    filenames = utils.get_all_files(include_dir, ['*.h'])
    filenames.sort()
    header = os.path.join(dirname, '../include/jet/jet.h')
    with open(header, 'w') as header_file:
        header_file.write("""// Copyright (c) 2017 Doyub Kim

// I am making my contributions/submissions to this project solely in my
// personal capacity and am not conveying any rights to any intellectual
// property of any third parties.\n
""")
        header_file.write('#ifndef INCLUDE_JET_JET_H_\n')
        header_file.write('#define INCLUDE_JET_JET_H_\n')
        for filename in filenames:
            if not filename.endswith('-inl.h'):
                line = '#include <jet/%s>\n' % os.path.basename(filename)
                header_file.write(line)
        header_file.write('#endif  // INCLUDE_JET_JET_H_\n')
def main():
    include_dir = os.path.join(dirname, '../include/jet')
    filenames = utils.get_all_files(include_dir, ['*.h'])
    filenames.sort()
    header = os.path.join(dirname, '../include/jet/jet.h')
    with open(header, 'w') as header_file:
        header_file.write("""// Copyright (c) 2017 Doyub Kim

// I am making my contributions/submissions to this project solely in my
// personal capacity and am not conveying any rights to any intellectual
// property of any third parties.\n
""")
        header_file.write('#ifndef INCLUDE_JET_JET_H_\n')
        header_file.write('#define INCLUDE_JET_JET_H_\n')
        for filename in filenames:
            if not filename.endswith('-inl.h'):
                line = '#include <jet/%s>\n' % os.path.basename(filename)
                header_file.write(line)
        header_file.write('#endif  // INCLUDE_JET_JET_H_\n')
Esempio n. 11
0
    def __init__(self,
                 batch_size,
                 num_vid_frames=4,
                 framerate=30,
                 framesize=256,
                 samplerate=16000,
                 max_vid_frames=100,
                 noise_std=0.01,
                 center_fft=True,
                 use_polar=True,
                 shuffle_files=True,
                 data_path="./data/raw"):

        assert batch_size > 1

        self.attention_extractor = VideoAttention()

        self.max_vid_frames = max_vid_frames
        self.batch_size = batch_size
        self.num_vid_frames = num_vid_frames
        self.framerate = framerate
        self.framesize = framesize
        self.samplerate = samplerate

        self.noise_std = noise_std

        self.normalize_input_fft = False

        self.fft_len = int(
            (self.num_vid_frames / self.framerate) * self.samplerate)

        self.all_vids = utils.get_all_files(data_path, "mp4")
        print(f'number of videos found: {len(self.all_vids)}')

        if shuffle_files:
            random.shuffle(self.all_vids)

        # self.current_av_pair = self.load_example_pair(self.all_vids[0])

        self.example_idx = 0
        self.center_fft = center_fft
        self.use_polar = use_polar
Esempio n. 12
0
def main(argv=None):
    args = docopt(__doc__, argv=argv)

    pictures = utils.get_all_files(args['<input>'])
    destination = args['<destination>']
    dry_run = args['--dry-run']
    move = args['--move']
    verbose = args['--verbose']

    groups = group_pictures(pictures)
    groups = group_consecutive_days(groups)

    for group, pics in groups.iteritems():
        print group, len(pics)

    send_to_destination(destination,
                        groups,
                        move=move,
                        dry_run=dry_run,
                        verbose=verbose)
Esempio n. 13
0
def main():
    include_dir = os.path.join(dirname, "../include/blending")
    filenames = utils.get_all_files(include_dir, ["*.h"])
    filenames.sort()
    header = os.path.join(dirname, "../include/blending/blending.h")
    header_tmp = header + ".tmp"
    with open(header_tmp, "w") as header_file:
        header_file.write("""// Copyright (c) 2018 loyave
""")
        header_file.write("#ifndef INCLUDE_BLENDING_BLENDING_H_\n")
        header_file.write("#define INCLUDE_BLENDING_BLENDING_H_\n")
        for filename in filenames:
            if not filename.endswith("-inl.h"):
                line = "#include <blending/%s>\n" % os.path.basename(filename)
                header_file.write(line)
        header_file.write("#endif  // INCLUDE_BLENDING_BLENDING_H_\n")
    if not filecmp.cmp(header, header_tmp):
        shutil.move(header_tmp, header)
    else:
        os.remove(header_tmp)
Esempio n. 14
0
def process_all(root, output):
    assert formatted or compact

    src_files = get_all_files(root, '.json')

    for f in tqdm(src_files):
        game = json.load(open(f, 'r'))
        game = process_game(game, f)

        dest_file = f.replace(root, output)
        dirname = os.path.dirname(dest_file)
        if not os.path.exists(dirname):
            os.makedirs(dirname)

        temp_json = tempfile.TemporaryFile('r+')
        json.dump(game, temp_json, indent=4)
        temp_json.seek(0)
        formatted = format_json(temp_json)
        with open(dest_file, 'w') as f:
            f.write(formatted)
Esempio n. 15
0
def main(input, output, emin, emax, log_energy, its, lkg):

    files = get_all_files(input)
    print("Indexing data...")
    generator = DataGeneratorR(files,
                               feature="energy",
                               shuffle=False,
                               intensity=its,
                               emin=emin,
                               emax=emax)
    print("Loading data and printing images...")
    title = "Gamma"
    for i in trange(generator.__len__()):
        images, energies = generator.__getitem__(i)
        #print("Images shape: ", images.shape)
        save_interp_images(charges=images,
                           title=title,
                           energies=energies,
                           start=i,
                           outputdir=output)
    print("Finished!")
Esempio n. 16
0
def render_grid2(filename, frame_rate=60):
    if is_animation(parse_tags(filename)):
        dirname = os.path.dirname(filename)
        basename = os.path.basename(filename).replace('0000', '[0-9][0-9][0-9][0-9]')
        seq = utils.get_all_files(dirname, [basename])
        seq.sort()
        if len(seq) == 0:
            return
        grid_data = np.load(seq[0])
        grid_data = np.flipud(grid_data)
        fig, ax = plt.subplots()
        im = ax.imshow(grid_data, cmap=plt.cm.gray, interpolation='nearest')
        if ISO_TAG in parse_tags(filename):
            plt.contour(grid_data)

        def update_image(i):
            grid_data = np.load(seq[i])
            grid_data = np.flipud(grid_data)
            im.set_array(grid_data)
            if ISO_TAG in parse_tags(filename):
                plt.contour(grid_data)
            return im

        output_filename = get_output_movie_filename(filename.replace(',' + X_TAG, ''))
        anim = animation.FuncAnimation(fig, update_image, frames=len(seq), interval=60, blit=False)
        anim.save(output_filename, fps=frame_rate, bitrate=5000, writer=video_writer, extra_args=video_extra_args)
        plt.close(fig)
        print ('Rendered <%s>' % output_filename)
    else:
        tags = parse_tags(filename)
        output_filename = get_output_bitmap_filename(filename)
        if X_TAG in tags:
            grid_data_u = np.load(filename)
            grid_data_v = np.load(filename.replace(X_TAG, Y_TAG))
            render_still_vector_grid2(filename, filename.replace(X_TAG, Y_TAG), output_filename)
        else:
            has_iso = ISO_TAG in parse_tags(filename)
            render_still_scalar_grid2(filename, output_filename, has_iso=has_iso)
Esempio n. 17
0
def SPConvert(context):
    args = context.args

    video = get_all_files(args.refVideo)[0] if args.refVideo else args.refVideo

    PARSER = cli.FullHelpArgumentParser()
    CONVERT = cli.ConvertArgs(
        PARSER, "convert",
        "Convert a source image to a new one with the face swapped")

    argsTransfer = [
        "--input-dir",
        args.inputData1,
        "--model-dir",
        args.inputModel,
        "--output-dir",
        args.outputData,
        "--color-adjustment",
        args.colorAdjustment,
        "--mask-type",
        args.maskType,
        "--scaling",
        args.scaling,
        "--gpus",
        str(args.__gpu),
    ]

    if args.inputData2:
        argsTransfer += [
            "--alignments",
            os.path.join(args.inputData2, "alignments.json"),
        ]

    ARGUMENTS = PARSER.parse_args(argsTransfer)
    ARGUMENTS.func(ARGUMENTS)

    return args.outputData
Esempio n. 18
0
def tester(folders, mdl, batch_size, atime, workers, intensity):
    h5files = get_all_files(folders)
    random.shuffle(h5files)

    model = load_model(mdl)

    print('Building test generator...')
    test_generator = DataGeneratorC(h5files,
                                    batch_size=batch_size,
                                    arrival_time=atime,
                                    shuffle=False,
                                    intensity=intensity)
    print('Number of test batches: ' + str(len(test_generator)))

    pr_labels = model.predict_generator(generator=test_generator,
                                        steps=None,
                                        max_queue_size=10,
                                        workers=workers,
                                        use_multiprocessing=True,
                                        verbose=1)

    test_idxs = test_generator.get_indexes()  # ground truth labels
    gt_labels = test_idxs[:, 2].reshape(test_idxs[:, 2].shape[0])
    pr_labels = pr_labels.reshape(pr_labels.shape[0])

    # saving predictions
    df = pd.DataFrame()
    df['GroundTruth'] = gt_labels
    df['Predicted'] = pr_labels

    df.to_csv(mdl + '_test.csv', sep=',', index=False, encoding='utf-8')

    p_file = mdl + '_test.csv'

    print('Results saved in ' + p_file)

    return p_file
Esempio n. 19
0
def main(annotation_file, image_dir, chip_dir, output_dir, show_ggnn, infer,
         _run):
    refiner = PolygonRefiner(infer=infer, show_ggnn=show_ggnn)
    if os.path.isfile(annotation_file):
        refiner.process_file(
            annotation_file,
            image_dir,
            chip_dir,
            output_dir,
        )
    else:
        image_dirs = get_all_files(annotation_file, isdir=True, extension="*")
        for image_dir in image_dirs:
            (child_chip_dir, child_output_dir) = [
                os.path.join(x, os.path.basename(image_dir))
                for x in (chip_dir, output_dir)
            ]
            annotation_file = os.path.join(image_dir, "annotations.csv")
            refiner.process_file(
                annotation_file,
                image_dir,
                child_chip_dir,
                child_output_dir,
            )
Esempio n. 20
0
def main():
    include_dir = os.path.join(dirname, "../include/jet")
    filenames = utils.get_all_files(include_dir, ["*.h"])
    filenames.sort()
    header = os.path.join(dirname, "../include/jet/jet.h")
    header_tmp = header + ".tmp"
    with open(header_tmp, "w") as header_file:
        header_file.write("""// Copyright (c) 2018 Doyub Kim

// I am making my contributions/submissions to this project solely in my
// personal capacity and am not conveying any rights to any intellectual
// property of any third parties.\n
""")
        header_file.write("#ifndef INCLUDE_JET_JET_H_\n")
        header_file.write("#define INCLUDE_JET_JET_H_\n")
        for filename in filenames:
            if not filename.endswith("-inl.h"):
                line = "#include <jet/%s>\n" % os.path.basename(filename)
                header_file.write(line)
        header_file.write("#endif  // INCLUDE_JET_JET_H_\n")
    if not filecmp.cmp(header, header_tmp):
        shutil.move(header_tmp, header)
    else:
        os.remove(header_tmp)
Esempio n. 21
0
def main():
    include_dir = os.path.join(dirname, "../include/jet")
    filenames = utils.get_all_files(include_dir, ["*.h"])
    filenames.sort()
    header = os.path.join(dirname, "../include/jet/jet.h")
    header_tmp = header + ".tmp"
    with open(header_tmp, "w") as header_file:
        header_file.write("""// Copyright (c) 2017 Doyub Kim

// I am making my contributions/submissions to this project solely in my
// personal capacity and am not conveying any rights to any intellectual
// property of any third parties.\n
""")
        header_file.write("#ifndef INCLUDE_JET_JET_H_\n")
        header_file.write("#define INCLUDE_JET_JET_H_\n")
        for filename in filenames:
            if not filename.endswith("-inl.h"):
                line = "#include <jet/%s>\n" % os.path.basename(filename)
                header_file.write(line)
        header_file.write("#endif  // INCLUDE_JET_JET_H_\n")
    if not filecmp.cmp(header, header_tmp):
        shutil.move(header_tmp, header)
    else:
        os.remove(header_tmp)
Esempio n. 22
0
    _x = []
    _y = []

    for xyz in xyz_file:   
        _x.append(float(xyz.split()[0]))
        _y.append(float(xyz.split()[1]))

    line.set_data(_x, _y)
    xyz_file.close()

    return line,

# Simulation Path
path = sys.argv[1]
output_name = sys.argv[2] if len(sys.argv) > 2 else 'result.mp4'
files = utils.get_all_files(path, "*.xyz")
size_files = len(files)

# Set up formatting for the movie files
Writer = animation.writers['ffmpeg']
writer = Writer(fps=25, bitrate=1800)

fig1 = plt.figure()

data = []
l, = plt.plot([], [], 'ro')

plt.xlim(0, 1)
plt.ylim(0, 1)
plt.xlabel('x')
plt.title('Simulation Result')
Esempio n. 23
0
def main():
    global _threadError

    num_threads = os.cpu_count() * 2
    root_dir = path.join(utils.get_script_folder(), '..')
    docs_dir = path.join(root_dir, 'docs')
    xml_dir = path.join(docs_dir, 'xml')
    html_dir = path.join(docs_dir, 'html')
    mcss_dir = path.join(root_dir, 'extern', 'mcss')
    doxygen = path.join(mcss_dir, 'documentation', 'doxygen.py')

    # delete any leftovers from the previous run
    if 1:
        utils.delete_directory(xml_dir)
        utils.delete_directory(html_dir)

    # run doxygen to generate the xml
    if 1:
        subprocess.check_call(['doxygen', 'Doxyfile'],
                              shell=True,
                              cwd=docs_dir)

    # fix some shit that's broken in the xml
    if 1:
        preprocess_xml(xml_dir)

    # run doxygen.py (m.css) to generate the html
    if 1:
        utils.run_python_script(doxygen, path.join(docs_dir, 'Doxyfile-mcss'),
                                '--no-doxygen')

    # post-process html files
    if 1:
        fixes = [
            CustomTagsFix(),
            SyntaxHighlightingFix(),
            IndexPageFix(),
            ModifiersFix1(),
            ModifiersFix2(),
            ExtDocLinksFix(),
            EnableIfFix(),
            ExternalLinksFix(),
            TemplateTemplateFix()
        ]
        files = [
            path.split(f)
            for f in utils.get_all_files(html_dir, any=('*.html', '*.htm'))
        ]
        if files:
            with futures.ThreadPoolExecutor(
                    max_workers=min(len(files), num_threads)) as executor:
                jobs = {
                    executor.submit(postprocess_file, dir, file, fixes): file
                    for dir, file in files
                }
                for job in futures.as_completed(jobs):
                    if _threadError:
                        executor.shutdown(False)
                        break
                    else:
                        file = jobs[job]
                        print('Finished processing {}.'.format(file))
            if _threadError:
                return 1
Esempio n. 24
0
def preprocess_xml(dir):
    global inline_namespaces
    global implementation_headers

    if not inline_namespaces and not implementation_headers:
        return

    write_xml_to_file = lambda x, f: x.write(
        f, encoding='utf-8', xml_declaration=True)
    inline_namespace_ids = [
        f'namespace{doxygen_mangle_name(ns)}' for ns in inline_namespaces
    ]
    implementation_header_data = [
        (hp, path.basename(hp), doxygen_mangle_name(path.basename(hp)),
         [(i, path.basename(i), doxygen_mangle_name(path.basename(i)))
          for i in impl]) for hp, impl in implementation_headers
    ]
    implementation_header_mappings = dict()
    implementation_header_innernamespaces = dict()
    implementation_header_sectiondefs = dict()
    for hdata in implementation_header_data:
        implementation_header_innernamespaces[hdata[2]] = []
        implementation_header_sectiondefs[hdata[2]] = []
        for (ip, ifn, iid) in hdata[3]:
            implementation_header_mappings[iid] = hdata

    if 1:
        extracted_implementation = False
        xml_files = utils.get_all_files(dir, any=('*.xml'))
        for xml_file in xml_files:
            print(f'Pre-processing {xml_file}')
            xml = ET.parse(xml_file)

            root = xml.getroot().find('compounddef')
            if root is None:
                continue

            changed = False

            # namespaces
            if root.get("kind") == "namespace" and inline_namespaces:
                for nsid in inline_namespace_ids:
                    if root.get("id") == nsid:
                        root.set("inline", "yes")
                        changed = True
                        break

            # dirs
            if root.get("kind") == "dir" and implementation_headers:
                innerfiles = root.findall('innerfile')
                for innerfile in innerfiles:
                    if innerfile.get(
                            'refid') in implementation_header_mappings:
                        root.remove(innerfile)
                        changed = True

            # header files
            if root.get("kind") == "file" and implementation_headers:
                # remove junk not required by m.css
                for tag in ('includes', 'includedby', 'incdepgraph',
                            'invincdepgraph'):
                    tags = root.findall(tag)
                    if tags:
                        for t in tags:
                            root.remove(t)
                            changed = True

                # rip the good bits out of implementation headers
                if root.get("id") in implementation_header_mappings:
                    hid = implementation_header_mappings[root.get("id")][2]
                    innernamespaces = root.findall('innernamespace')
                    if innernamespaces:
                        implementation_header_innernamespaces[
                            hid] = implementation_header_innernamespaces[
                                hid] + innernamespaces
                        extracted_implementation = True
                        for tag in innernamespaces:
                            root.remove(tag)
                            changed = True
                    sectiondefs = root.findall('sectiondef')
                    if sectiondefs:
                        implementation_header_sectiondefs[
                            hid] = implementation_header_sectiondefs[
                                hid] + sectiondefs
                        extracted_implementation = True
                        for tag in sectiondefs:
                            root.remove(tag)
                            changed = True

            if changed:
                write_xml_to_file(xml, xml_file)

        # merge extracted implementations
        if extracted_implementation:
            for (hp, hfn, hid, impl) in implementation_header_data:
                xml_file = path.join(dir, f'{hid}.xml')
                print(f'Merging implementation nodes into {xml_file}')
                xml = ET.parse(xml_file)
                root = xml.getroot().find('compounddef')
                changed = False

                innernamespaces = root.findall('innernamespace')
                for new_tag in implementation_header_innernamespaces[hid]:
                    matched = False
                    for existing_tag in innernamespaces:
                        if existing_tag.get('refid') == new_tag.get('refid'):
                            matched = True
                            break
                    if not matched:
                        root.append(new_tag)
                        innernamespaces.append(new_tag)
                        changed = True

                sectiondefs = root.findall('sectiondef')
                for new_section in implementation_header_sectiondefs[hid]:
                    matched_section = False
                    for existing_section in sectiondefs:
                        if existing_section.get('kind') == new_section.get(
                                'kind'):
                            matched_section = True

                            memberdefs = existing_section.findall('memberdef')
                            new_memberdefs = new_section.findall('memberdef')
                            for new_memberdef in new_memberdefs:
                                matched = False
                                for existing_memberdef in memberdefs:
                                    if existing_memberdef.get(
                                            'id') == new_memberdef.get('id'):
                                        matched = True
                                        break

                                if not matched:
                                    new_section.remove(new_memberdef)
                                    existing_section.append(new_memberdef)
                                    memberdefs.append(new_memberdef)
                                    changed = True
                            break

                    if not matched_section:
                        root.append(new_section)
                        sectiondefs.append(new_section)
                        changed = True

                if changed:
                    write_xml_to_file(xml, xml_file)

    # delete the impl header xml files
    if 1 and implementation_headers:
        for hdata in implementation_header_data:
            for (ip, ifn, iid) in hdata[3]:
                xml_file = path.join(dir, f'{iid}.xml')
                if (path.exists(xml_file)):
                    print(f'Deleting {xml_file}')
                    os.remove(xml_file)

    # scan through the files and substitute impl header ids and paths as appropriate
    if 1 and implementation_headers:
        xml_files = utils.get_all_files(dir, any=('*.xml'))
        for xml_file in xml_files:
            print(f'Re-linking implementation headers in {xml_file}')
            xml = utils.read_all_text_from_file(xml_file)
            for (hp, hfn, hid, impl) in implementation_header_data:
                for (ip, ifn, iid) in impl:
                    #xml = xml.replace(f'refid="{iid}"',f'refid="{hid}"')
                    xml = xml.replace(f'compoundref="{iid}"',
                                      f'compoundref="{hid}"')
                    xml = xml.replace(ip, hp)
            with open(xml_file, 'w', encoding='utf-8', newline='\n') as f:
                f.write(xml)
Esempio n. 25
0
from utils import get_all_files

paths = get_all_files('A')
with open('dir.txt', 'w') as f:
    for i in paths:
        f.write(i + "\n")
Esempio n. 26
0
def regressor_training_main(folders,
                            val_folders,
                            load_model,
                            model_name,
                            time,
                            epochs,
                            batch_size,
                            opt,
                            learning_rate,
                            lropf,
                            sd,
                            es,
                            feature,
                            workers,
                            test_dirs,
                            intensity_cut,
                            leakage,
                            tb,
                            gpu_fraction=0.5,
                            emin=-100,
                            emax=100,
                            class_model='',
                            clr=False,
                            train_indexes=None,
                            valid_indexes=None,
                            clr_values=[5e-5, 5e-3, 4]):
    ###################################
    #TensorFlow wizardry for GPU dynamic memory allocation
    if 0 < gpu_fraction < 1:
        config = tf.ConfigProto()
        # Don't pre-allocate memory; allocate as-needed
        config.gpu_options.allow_growth = True
        # Only allow a fraction of the GPU memory to be allocated
        config.gpu_options.per_process_gpu_memory_fraction = gpu_fraction
        # Create a session with the above options specified.
        K.tensorflow_backend.set_session(tf.Session(config=config))
        ###################################

    # remove semaphore warnings
    os.environ["PYTHONWARNINGS"] = "ignore:semaphore_tracker:UserWarning"

    # avoid validation deadlock problem
    mp.set_start_method('spawn', force=True)

    # hard coded parameters
    shuffle = True
    channels = 1
    if time:
        channels = 2

    # early stopping
    md_es = 0.1  # min delta
    p_es = 50  # patience

    # cycle learning rate CLR
    base_lr = clr_values[0]
    max_lr = clr_values[1]
    step_size = clr_values[2]

    # sgd
    lr = 0.01  # lr
    decay = 1e-4  # decay
    momentum = 0.9  # momentum
    nesterov = True

    # adam
    a_lr = learning_rate
    a_beta_1 = 0.9
    a_beta_2 = 0.999
    a_epsilon = None
    a_decay = 0
    amsgrad = True

    # adabound
    ab_lr = 1e-03
    ab_final_lr = 0.1
    ab_gamma = 1e-03
    ab_weight_decay = 0
    amsbound = False

    # rmsprop
    r_lr = 0.01
    r_rho = 0.9
    r_epsilon = None
    r_decay = 0.0

    # reduce lr on plateau
    f_lrop = 0.1  # factor
    p_lrop = 15  # patience
    md_lrop = 0.005  # min delta
    cd_lrop = 5  # cool down
    mlr_lrop = a_lr / 100  # min lr

    # cuts
    # intensity_cut = 50
    leakage2_intensity_cut = leakage

    training_files = get_all_files(folders)
    validation_files = get_all_files(val_folders)

    # create a folder to keep model & results
    now = datetime.datetime.now()
    root_dir = now.strftime(model_name + '_' + feature + '_' +
                            '%Y-%m-%d_%H-%M')
    mkdir(root_dir)
    models_dir = join(root_dir, "models")
    mkdir(models_dir)

    # generators
    print('Building training generator...')
    training_generator = LSTGenerator(
        training_files,
        batch_size=batch_size,
        arrival_time=time,
        feature=feature,
        shuffle=shuffle,
        intensity=intensity_cut,
        leakage2_intensity=leakage2_intensity_cut,
        load_indexes=train_indexes)
    if train_indexes is None:
        train_idxs = training_generator.get_all_info()
        train_idxs.to_pickle(join(root_dir, "train_indexes.pkl"))

    if len(val_folders) > 0:
        print('Building validation generator...')
        validation_generator = LSTGenerator(
            validation_files,
            batch_size=batch_size,
            arrival_time=time,
            feature=feature,
            shuffle=False,
            intensity=intensity_cut,
            leakage2_intensity=leakage2_intensity_cut,
            load_indexes=valid_indexes)
        if valid_indexes is None:
            valid_idxs = validation_generator.get_all_info()
            valid_idxs.to_pickle(join(root_dir, "valid_indexes.pkl"))
    # class_weight = {0: 1., 1: train_protons/train_gammas}
    # print(class_weight)

    # get image size (rows and columns)
    img_rows = training_generator.img_rows
    img_cols = training_generator.img_cols

    hype_print = '\n' + '======================================HYPERPARAMETERS======================================'

    hype_print += '\n' + 'Image rows: ' + str(
        img_rows) + ' Image cols: ' + str(img_cols)
    hype_print += '\n' + 'Folders:' + str(folders)
    hype_print += '\n' + 'Model: ' + str(model_name)
    hype_print += '\n' + 'Use arrival time: ' + str(time)
    hype_print += '\n' + 'Epochs:' + str(epochs)
    hype_print += '\n' + 'Batch size: ' + str(batch_size)
    hype_print += '\n' + 'Optimizer: ' + str(opt)
    hype_print += '\n' + 'Feature: ' + str(feature)
    hype_print += '\n' + 'Validation: ' + str(val_folders)
    hype_print += '\n' + 'Test dirs: ' + str(test_dirs)

    hype_print += '\n' + 'intensity_cut: ' + str(intensity_cut)
    hype_print += '\n' + 'leakage2_intensity_cut: ' + str(
        leakage2_intensity_cut)

    if clr:
        hype_print += '\n' + '--- Cycle Learning Rate ---'
        hype_print += '\n' + 'Base LR: ' + str(base_lr)
        hype_print += '\n' + 'Max LR: ' + str(max_lr)
        hype_print += '\n' + 'Step size: ' + str(step_size) + ' (' + str(
            step_size * len(training_generator)) + ')'
    if es:
        hype_print += '\n' + '--- Early stopping ---'
        hype_print += '\n' + 'Min delta: ' + str(md_es)
        hype_print += '\n' + 'Patience: ' + str(p_es)
        hype_print += '\n' + '----------------------'
    if opt == 'sgd':
        hype_print += '\n' + '--- SGD ---'
        hype_print += '\n' + 'Learning rate:' + str(lr)
        hype_print += '\n' + 'Decay: ' + str(decay)
        hype_print += '\n' + 'Momentum: ' + str(momentum)
        hype_print += '\n' + 'Nesterov: ' + str(nesterov)
        hype_print += '\n' + '-----------'
    elif opt == 'adam':
        hype_print += '\n' + '--- ADAM ---'
        hype_print += '\n' + 'lr: ' + str(a_lr)
        hype_print += '\n' + 'beta_1: ' + str(a_beta_1)
        hype_print += '\n' + 'beta_2: ' + str(a_beta_2)
        hype_print += '\n' + 'epsilon: ' + str(a_epsilon)
        hype_print += '\n' + 'decay: ' + str(a_decay)
        hype_print += '\n' + 'Amsgrad: ' + str(amsgrad)
        hype_print += '\n' + '------------'
    elif opt == 'rmsprop':
        hype_print += '\n' + '--- RMSprop ---'
        hype_print += '\n' + 'lr: ' + str(r_lr)
        hype_print += '\n' + 'rho: ' + str(r_rho)
        hype_print += '\n' + 'epsilon: ' + str(r_epsilon)
        hype_print += '\n' + 'decay: ' + str(r_decay)
        hype_print += '\n' + '------------'
    if lropf:
        hype_print += '\n' + '--- Reduce lr on plateau ---'
        hype_print += '\n' + 'lr decrease factor: ' + str(f_lrop)
        hype_print += '\n' + 'Patience: ' + str(p_lrop)
        hype_print += '\n' + 'Min delta: ' + str(md_lrop)
        hype_print += '\n' + 'Cool down:' + str(cd_lrop)
        hype_print += '\n' + 'Min lr: ' + str(mlr_lrop)
        hype_print += '\n' + '----------------------------'
    if sd:
        hype_print += '\n' + '--- Step decay ---'

    hype_print += '\n' + 'Workers: ' + str(workers)
    hype_print += '\n' + 'Shuffle: ' + str(shuffle)

    hype_print += '\n' + 'Number of training batches: ' + str(
        len(training_generator))

    if len(val_folders) > 0:
        hype_print += '\n' + 'Number of validation batches: ' + str(
            len(validation_generator))

    outcomes = 1
    # loss = 'mean_absolute_percentage_error'
    loss = 'mean_absolute_error'
    # loss = 'mean_squared_error'
    if feature == 'direction':
        outcomes = 2
        # loss = 'mean_absolute_error'
        # loss = 'mean_squared_error'

    # keras.backend.set_image_data_format('channels_first')
    if load_model:
        model = keras.models.load_model(model_name)
        model_name = Path(model_name).name
    else:
        model, hype_print = regressor_selector(model_name, hype_print,
                                               channels, img_rows, img_cols,
                                               outcomes)

    hype_print += '\n' + '========================================================================================='

    # printing on screen hyperparameters
    print(hype_print)

    # writing hyperparameters on file
    f = open(root_dir + '/hyperparameters.txt', 'w')
    f.write(hype_print)
    f.close()

    model.summary()

    callbacks = []

    if len(val_folders) > 0:
        checkpoint = ModelCheckpoint(
            filepath=models_dir + '/' + model_name +
            '_{epoch:02d}_{loss:.5f}_{val_loss:.5f}.h5',
            monitor='val_loss',
            save_best_only=False)
    else:
        checkpoint = ModelCheckpoint(filepath=models_dir + '/' + model_name +
                                     '_{epoch:02d}_{loss:.5f}.h5',
                                     monitor='loss',
                                     save_best_only=True)

    callbacks.append(checkpoint)

    # tensorboard = keras.callbacks.TensorBoard(log_dir=root_dir + "/logs",
    #                                          histogram_freq=5,
    #                                          batch_size=batch_size,
    #                                          write_images=True,
    #                                          update_freq=batch_size * 100)

    history = LossHistoryR()

    csv_callback = keras.callbacks.CSVLogger(root_dir + '/epochs_log.csv',
                                             separator=',',
                                             append=False)

    callbacks.append(history)
    callbacks.append(csv_callback)

    # callbacks.append(tensorboard)

    # sgd
    optimizer = None
    if opt == 'sgd':
        sgd = optimizers.SGD(lr=lr,
                             decay=decay,
                             momentum=momentum,
                             nesterov=nesterov)
        optimizer = sgd
    elif opt == 'adam':
        adam = optimizers.Adam(lr=a_lr,
                               beta_1=a_beta_1,
                               beta_2=a_beta_2,
                               epsilon=a_epsilon,
                               decay=a_decay,
                               amsgrad=amsgrad)
        optimizer = adam


#    elif opt == 'adabound':
#        adabound = AdaBound(lr=ab_lr, final_lr=ab_final_lr, gamma=ab_gamma, weight_decay=ab_weight_decay,
#                            amsbound=False)
#        optimizer = adabound
    elif opt == 'rmsprop':
        rmsprop = optimizers.RMSprop(lr=r_lr,
                                     rho=r_rho,
                                     epsilon=r_epsilon,
                                     decay=r_decay)
        optimizer = rmsprop

    # reduce lr on plateau
    if lropf:
        lrop = keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                 factor=f_lrop,
                                                 patience=p_lrop,
                                                 verbose=1,
                                                 mode='auto',
                                                 min_delta=md_lrop,
                                                 cooldown=cd_lrop,
                                                 min_lr=mlr_lrop)
        callbacks.append(lrop)

    if sd:

        # learning rate schedule
        def step_decay(epoch):
            current = K.eval(model.optimizer.lr)
            lrate = current
            if epoch == 99:
                lrate = current / 10
                print('Reduced learning rate by a factor 10')
            return lrate

        stepd = LearningRateScheduler(step_decay)
        callbacks.append(stepd)

    if es:
        # early stopping
        early_stopping = EarlyStopping(monitor='val_loss',
                                       min_delta=md_es,
                                       patience=p_es,
                                       verbose=1,
                                       mode='max')
        callbacks.append(early_stopping)

    if tb:
        tb_path = os.path.join(root_dir, 'tb')
        if not os.path.exists(tb_path):
            os.mkdir(tb_path)
        #tb_path = os.path.join(tb_path, root_dir)
        # os.mkdir(tb_path)
        tensorboard = TensorBoard(log_dir=tb_path)
        callbacks.append(tensorboard)
    '''
    findlr=False
    if findlr:
        lr_callback = LRFinder(len(training_generator)*batch_size, batch_size,
                               1e-05, 1e01,
                               # validation_data=(X_val, Y_val),
                               lr_scale='exp', save_dir=join(root_dir,'clr'))
        callbacks.append(lr_callback)
    
    oclr=True #CORREGGI STA MERDA OPPURE CANCELLA
    if oclr:
        lr_manager = OneCycleLR(
            max_lr=0.001,
            maximum_momentum=0.9,
            minimum_momentum=None,
            verbose=True
        )
        callbacks.append(lr_manager)
    
    oclr=True
    if oclr:
        ocp = ktools.one_cycle.OneCycle(
            lr_range=(1e-5, 1e-3),
            momentum_range=(0.95, 0.85),
            reset_on_train_begin=True,
            record_frq=10
        )
        callbacks.append(ocp)
    '''

    if clr:
        cyclelr = CyclicLR(
            base_lr=base_lr,
            max_lr=max_lr,
            step_size=step_size * len(training_generator),
            #mode='exp_range' # uncomment to activate exp mode, instead of traingular
        )
        callbacks.append(cyclelr)

    model.compile(optimizer=optimizer, loss=loss)

    if len(val_folders) > 0:
        model.fit(x=training_generator,
                  validation_data=validation_generator,
                  steps_per_epoch=len(training_generator),
                  validation_steps=len(validation_generator),
                  epochs=epochs,
                  verbose=1,
                  max_queue_size=10,
                  use_multiprocessing=True,
                  workers=workers,
                  shuffle=False,
                  callbacks=callbacks)
    else:
        model.fit(x=training_generator,
                  steps_per_epoch=len(training_generator),
                  epochs=epochs,
                  verbose=1,
                  max_queue_size=10,
                  use_multiprocessing=True,
                  workers=workers,
                  shuffle=False,
                  callbacks=callbacks)

    # mp.set_start_method('fork')

    # save results
    train_history = root_dir + '/train-history'
    with open(train_history, 'wb') as file_pi:
        pickle.dump(history.dic, file_pi)

    # post training operations

    # training plots
    train_plots(train_history, False)

    if len(test_dirs) > 0:

        if len(val_folders) > 0:
            # get the best model on validation
            val_loss = history.dic['val_losses']
            m = val_loss.index(
                min(val_loss))  # get the index with the highest accuracy

            model_checkpoints = [
                join(root_dir, f) for f in listdir(root_dir)
                if (isfile(join(root_dir, f)) and f.startswith(
                    model_name + '_' + '{:02d}'.format(m + 1)))
            ]

            best = model_checkpoints[0]

            print('Best checkpoint: ', best)

        else:
            # get the best model on validation
            acc = history.dic['losses']
            m = acc.index(min(acc))  # get the index with the highest accuracy

            model_checkpoints = [
                join(root_dir, f) for f in listdir(root_dir)
                if (isfile(join(root_dir, f)) and f.startswith(
                    model_name + '_' + '{:02d}'.format(m + 1)))
            ]

            best = model_checkpoints[0]

            print('Best checkpoint: ', best)

        # test plots & results if test data is provided
        if len(test_dirs) > 0:
            pkl = tester(test_dirs, best, batch_size, time, feature, workers)
            test_plots(pkl, feature)
Esempio n. 27
0
    trainlist = datacfg_options['train']
    nsamples = utils.read_train_list(trainlist)
    gpus = datacfg_options['gpus']
    num_workers = int(datacfg_options['num_workers'])
    backupdir = datacfg_options['backup']
    if not os.path.exists(backupdir):
        os.makedirs(backupdir)
    batch_size = int(netcfg_options['batch'])
    max_batches = int(netcfg_options['max_batches'])
    learning_rate = float(netcfg_options['learning_rate'])
    momentum = float(netcfg_options['momentum'])
    decay = float(netcfg_options['decay'])
    steps = [float(step) for step in netcfg_options['steps'].split(',')]
    scales = [float(scale) for scale in netcfg_options['scales'].split(',')]
    bg_file_names = utils.get_all_files('./VOCdevkit/VOC2012/JPEGImages')

    # Train parameters
    max_epochs = 700  # max_batches*batch_size/nsamples+1
    use_cuda = True
    seed = int(time.time())
    eps = 1e-5
    save_interval = 10  # epoches
    dot_interval = 70  # batches
    best_acc = -1

    # Test parameters
    conf_thresh = 0.05
    nms_thresh = 0.4
    match_thresh = 0.5
    iou_thresh = 0.5
Esempio n. 28
0
def tester(folders,
           model,
           batch_size,
           time,
           feature,
           workers,
           intensity_cut,
           leakage,
           gpu_fraction=1,
           e_model=None,
           test_indexes=None):

    if 0 >= gpu_fraction or gpu_fraction > 1:
        pass
    ###################################
    #TensorFlow wizardry for GPU dynamic memory allocation
    else:
        config = tf.ConfigProto()
        # Don't pre-allocate memory; allocate as-needed
        config.gpu_options.allow_growth = True
        # Only allow a fraction of the GPU memory to be allocated
        config.gpu_options.per_process_gpu_memory_fraction = gpu_fraction
        # Create a session with the above options specified.
        keras.backend.tensorflow_backend.set_session(tf.Session(config=config))
        ###################################

    all_files = utils.get_all_files(folders)

    print('Building test generator...')
    plike = False
    if feature == 'direction':
        plike = True

    test_generator = LSTGenerator(h5files=all_files,
                                  feature=feature,
                                  batch_size=batch_size,
                                  arrival_time=time,
                                  shuffle=False,
                                  intensity=intensity_cut,
                                  leakage2_intensity=leakage,
                                  plike=plike,
                                  load_indexes=test_indexes)
    test_idxs = test_generator.get_all_info()
    if test_indexes is None:
        test_idxs.to_pickle("./test_indexes.pkl")
        pd.DataFrame(all_files).to_csv("./test_files.csv")
    #if feature == 'gammaness':
    #    test_gammas = np.unique(test_idxs['class'], return_counts=True)[1][1]
    #    test_protons = np.unique(test_idxs['class'], return_counts=True)[1][0]

    #    test_gamma_frac = test_generator.gamma_fraction()
    #    print('Summary: \nTest gammas: {} \nTest protons: {}, \nGamma fraction: {}'.format(test_gammas, test_protons,test_gamma_frac))

    print('Number of test batches: ' + str(len(test_generator)))
    print('Loading model...')
    mdl = keras.models.load_model(model)
    print('Predicting {}...'.format(feature))
    predictions = mdl.predict_generator(generator=test_generator,
                                        max_queue_size=10,
                                        workers=workers,
                                        use_multiprocessing=True,
                                        verbose=1)

    # adjust length of indexes

    print('Retrieving true {}...'.format(feature))
    df = pd.DataFrame()
    df['energy_true'] = test_generator.get_energies(log=True)
    if feature is not 'energy' and e_model is not None:
        print('Predicting energy...')
        e_mdl = keras.models.load_model(e_model)
        e_predictions = e_mdl.predict_generator(generator=test_generator,
                                                max_queue_size=10,
                                                workers=workers,
                                                use_multiprocessing=True,
                                                verbose=1)
        df['energy_reco'] = e_predictions

    if feature == 'gammaness':
        df['class'] = test_generator.get_class()
        df['gammaness'] = predictions
    elif feature == 'energy':
        df['energy_reco'] = predictions
    elif feature == 'direction':
        df['d_alt_true'] = test_generator.get_AltAz(nom_frame=True)[:, 0]
        df['d_az_true'] = test_generator.get_AltAz(nom_frame=True)[:, 1]
        df['d_alt_reco'] = predictions[:, 0]
        df['d_az_reco'] = predictions[:, 1]

    res_file = model + '_' + feature + '_test.pkl'
    df.to_pickle(res_file)
    print('Results saved in ' + res_file)
    #print('Summary: \nTest gammas: {} \nTest protons: {}, \nGamma fraction: {}'.format(test_gammas, test_protons, test_gamma_frac))
    return df
# converts video framerates all to specified rate
import utils
import subprocess
import os

all_vids = utils.get_all_files("./data/raw", "mp4")

for v in all_vids:
    directory = os.path.split(v)[0]
    directory = f'{os.path.split(v)[0]}/video/'

    if not os.path.exists(directory):
        print(f'making directory: {directory}')
        os.makedirs(directory)

    filename = os.path.split(v)[-1]
    # filename = filename[:-4]
    # filename = f"{filename}.wav"
    output_file = f'{directory}{filename}'

    result = subprocess.Popen(["ffmpeg", "-i", v, "-filter:v", "fps=30", output_file],stdout=subprocess.PIPE)
    result.wait()
Esempio n. 30
0
def build_native_dir(directory, output_dir, cache_dir, abis, std_includes_path, rules: BaseConfig):
	executables = {}
	for abi in abis:
		executable = prepare_compiler_executable(abi)
		if executable is None:
			print("failed to acquire GCC executable from NDK for abi " + abi)
			return CODE_FAILED_NO_GCC
		executables[abi] = executable

	try:
		manifest = get_manifest(directory)
		targets = {}
		soname = "lib" + manifest["shared"]["name"] + ".so"
		for abi in abis:
			targets[abi] = os.path.join(output_dir, "so/" + abi + "/" + soname)
	except Exception as err:
		print("failed to read manifest for directory " + directory + " error: " + str(err))
		return CODE_FAILED_INVALID_MANIFEST

	keep_sources = rules.get_value("keepSources", fallback=False)
	if keep_sources:
		# copy everything and clear build files
		copy_directory(directory, output_dir, clear_dst=True)
		clear_directory(os.path.join(output_dir, "so"))
		os.remove(os.path.join(output_dir, soname))
	else:
		clear_directory(output_dir)

		# copy manifest
		copy_file(os.path.join(directory, "manifest"), os.path.join(output_dir, "manifest"))

		# copy includes
		keep_includes = rules.get_value("keepIncludes", fallback=True)
		for include_path in manifest["shared"]["include"]:
			src_include_path = os.path.join(directory, include_path)
			output_include_path = os.path.join(output_dir, include_path)
			if keep_includes:
				copy_directory(src_include_path, output_include_path, clear_dst=True)
			else:
				clear_directory(output_include_path)

	std_includes = []
	for std_includes_dir in os.listdir(std_includes_path):
		std_includes.append(os.path.abspath(os.path.join(std_includes_path, std_includes_dir)))

	# compile for every abi
	overall_result = CODE_OK
	for abi in abis:
		printed_compilation_title = f"compiling {os.path.basename(directory)} for {abi}"
		print("\n")
		print(f"{'=' * (48 - len(printed_compilation_title) // 2)} {printed_compilation_title} {'=' * (48 - (1 + len(printed_compilation_title)) // 2)}")

		executable = executables[abi]
		gcc = [executable, "-std=c++11"]
		includes = []
		for std_includes_dir in std_includes:
			includes.append(f'-I{std_includes_dir}')
		dependencies = [f'-L{get_fake_so_dir(abi)}', "-landroid", "-lm", "-llog"]
		for link in rules.get_value("link", fallback=[]) + make_config.get_value("make.linkNative", fallback=[]) + ["horizon"]:
			add_fake_so(executable, abi, link)
			dependencies.append(f'-l{link}')
		if "depends" in manifest:
			search_dir = os.path.abspath(os.path.join(directory, ".."))  # always search for dependencies in current dir
			for dependency in manifest["depends"]:
				if dependency is not None:
					add_fake_so(executable, abi, dependency)
					dependencies.append("-l" + dependency)
					dependency_dir = search_directory(search_dir, dependency)
					if dependency_dir is not None:
						try:
							for include_dir in get_manifest(dependency_dir)["shared"]["include"]:
								includes.append("-I" + os.path.join(dependency_dir, include_dir))
						except KeyError:
							pass
				else:
					print(f"ERROR: dependency directory {dependency} is not found, it will be skipped")

		# prepare directories
		source_files = get_all_files(directory, extensions=(".cpp", ".c"))
		preprocessed_dir = os.path.abspath(os.path.join(cache_dir, "preprocessed", abi))
		ensure_directory(preprocessed_dir)
		object_dir = os.path.abspath(os.path.join(cache_dir, "object", abi))
		ensure_directory(object_dir)

		# pre-process and compile changes
		import filecmp
		object_files = []
		recompiled_count = 0
		for file in source_files:
			relative_file = relative_path(directory, file)
			sys.stdout.write("preprocessing " + relative_file + " " * 64 + "\r")

			object_file = os.path.join(object_dir, relative_file) + ".o"
			preprocessed_file = os.path.join(preprocessed_dir, relative_file)
			tmp_preprocessed_file = preprocessed_file + ".tmp"
			ensure_file_dir(preprocessed_file)
			ensure_file_dir(object_file)
			object_files.append(object_file)

			result = subprocess.call(gcc + ["-E", file, "-o", tmp_preprocessed_file] + includes)
			if result == CODE_OK:
				if not os.path.isfile(preprocessed_file) or not os.path.isfile(object_file) or \
						not filecmp.cmp(preprocessed_file, tmp_preprocessed_file):
					if os.path.isfile(preprocessed_file):
						os.remove(preprocessed_file)
					os.rename(tmp_preprocessed_file, preprocessed_file)
					if os.path.isfile(object_file):
						os.remove(object_file)

					sys.stdout.write("compiling " + relative_file + " " * 64 + "\n")
					result = max(result, subprocess.call(gcc + ["-c", preprocessed_file, "-shared", "-o", object_file]))
					if result != CODE_OK:
						if os.path.isfile(object_file):
							os.remove(object_file)
						overall_result = result
					else:
						recompiled_count += 1
			else:
				if os.path.isfile(object_file):
					os.remove(object_file)
				overall_result = result

		print(" " * 128)
		if overall_result != CODE_OK:
			print("failed to compile", overall_result)
			return overall_result
		else:
			print(f"recompiled {recompiled_count}/{len(object_files)} files with result {overall_result}")

		ensure_file_dir(targets[abi])

		command = []
		command += gcc
		command += object_files
		command.append("-shared")
		command.append("-Wl,-soname=" + soname)
		command.append("-o")
		command.append(targets[abi])
		command += includes
		command += dependencies
		print("linking object files...")
		result = subprocess.call(command)
		if result == CODE_OK:
			print("build successful")
		else:
			print("linker failed with result code", result)
			overall_result = result
			return overall_result
	return overall_result
#!/usr/bin/env python

"""
Renders manual_tests output files
"""

import render
import utils

if __name__ == '__main__':
    filenames = utils.get_all_files('manual_tests_output', ['*' + render.INPUT_ARRAY_FORMAT])
    filenames.sort()

    for filename in filenames:
        try:
            tags = render.parse_tags(filename)
            # Pick the first component only
            if render.Y_TAG in tags or render.Z_TAG in tags:
                continue
            # Pick the first frame only
            if render.is_animation(tags) and '0000' not in tags:
                continue
            if render.POINT2_TAG in tags:
                render.render_point2(filename)
            elif render.POINT3_TAG in tags:
                render.render_point3(filename)
            elif render.LINE2_TAG in tags:
                render.render_line2(filename)
            elif render.LINE3_TAG in tags:
                render.render_line3(filename)
            elif render.GRID2_TAG in tags:
Esempio n. 32
0
            continue
    tweets = pd.DataFrame(tweets_data)
    return tweets


def result(total_pos, total_neu, total_neg):
    each_live_result = dict(num_pos=total_pos,
                            num_neu=total_neu,
                            num_neg=total_neg)
    return each_live_result


if __name__ == '__main__':
    tweets_data_path = 'tweets_stream/'
    out = {}
    files = get_all_files(Config.data_path + tweets_data_path,
                          extension='json')
    for file in files:
        tweets = load_live_tweets(file)
        cleaned_tweets = preprocess(tweets)

        total_pos, total_neu, total_neg = vader_sentiment(cleaned_tweets)
        time_stamp = cleaned_tweets.iat[
            1, 0]  # get the time stamp of each live stream file
        each_live_result = result(total_pos, total_neu, total_neg)
        out.update({time_stamp: each_live_result})

    save_to_disk(data=out,
                 path=Config.reports_path,
                 filename='live_sentiment_summary.json')
Esempio n. 33
0
    chrome_path_mac = 'open -a /Applications/Google\ Chrome.app %s'
    chrome_path_windows = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'
    if platform.system() == 'Darwin':
        web_browser_path = chrome_path_mac
    elif platform.system() == 'Windows':
        web_browser_path = chrome_path_windows

    #Running of Main Code Body with Warning Suppression
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')
        safe_mkdir(output_dir)
        safe_mkdir(output_dir_cluster)

        #JPEG Colour Converter
        print('WITHOUT CLUSTERING: Converting image files to ASCII HTML files now.')
        for file_name in get_all_files(input_dir, file_ext='.jpg'):
            img_obj = JPEGColourConverter(img_path=file_name, output_dir=output_dir, line_height=line_height, font_size=font_size, h_stretch=h_stretch, symbol=symbol, max_size=max_size, web_browser=web_browser_path, background_colour=background_colour)
            img_obj.convert_save_open(open=False, loading_name=img_obj.save_file_name)
        print('Conversion completed.')

        #JPEG Cluster Colour Converter
        print('WITH CLUSTERING: Converting image files to ASCII HTML files now.')
        for num_cluster in num_clusters:
            for file_name in get_all_files(input_dir, file_ext='.jpg'):
                img_obj = JPEGClusterColourConverter(img_path=file_name, output_dir=output_dir_cluster, line_height=line_height, font_size=font_size, h_stretch=h_stretch, symbol=symbol, max_size=max_size, web_browser=web_browser_path, background_colour=background_colour, num_clusters=num_cluster)
                img_obj.convert_save_open(open=False, loading_name=img_obj.save_file_name)
        print('Conversion completed.')

            

    maindir = sys.argv[1]
    output = sys.argv[2]

    # sanity checks
    if not os.path.isdir(maindir):
        print 'ERROR: directory',maindir,'does not exists.'
        sys.exit(0)
    if os.path.isfile(output):
        print 'ERROR: file',output,'exists, delete or provide a new filename.'
        sys.exit(0)

    # start time
    t1 = time.time()

    # get all h5 files
    allh5 = utils.get_all_files(maindir,ext='.h5')
    print 'found',len(allh5),'H5 files.'

    # create summary file
    HDF5.create_aggregate_file(output,expectedrows=len(allh5),
                               summaryfile=True)
    print 'Summary file created, we start filling it.'

    # fill it
    h5 = HDF5.open_h5_file_append(output)
    HDF5.fill_hdf5_aggregate_file(h5,allh5,summaryfile=True)
    h5.close()

    # done!
    stimelength = str(datetime.timedelta(seconds=time.time()-t1))
    print 'Summarized',len(allh5),'files in:',stimelength
Esempio n. 35
0
    # GET DIR/FILE
    if not os.path.exists(sys.argv[1]):
        print 'file or dir:', sys.argv[1], 'does not exist.'
        sys.exit(0)
    if os.path.isfile(sys.argv[1]):
        if os.path.splitext(sys.argv[1])[1] != '.h5':
            print 'we expect a .h5 extension for file:', sys.argv[1]
            sys.exit(0)
        allh5files = [os.path.abspath(sys.argv[1])]
    elif not os.path.isdir(sys.argv[1]):
        print sys.argv[
            1], "is neither a file nor a directory? confused... a link? c'est klug?"
        sys.exit(0)
    else:
        allh5files = utils.get_all_files(sys.argv[1], ext='.h5')
    if len(allh5files) == 0:
        print 'no .h5 file found, sorry, check directory you gave us:', sys.argv[
            1]

    # final sanity checks
    for f in allh5files:
        assert os.path.splitext(
            f
        )[1] == '.h5', 'file with wrong extension? should have been caught earlier... file=' + f
    nFiles = len(allh5files)
    if nFiles > 1000:
        print 'you are creating', nFiles, 'new matlab files, hope you have the space and time!'

    # let's go!
    cnt = 0
Esempio n. 36
0
    for i in chunked:
        if type(i) == Tree:
            current_chunk.append(" ".join([token for token, pos in i.leaves()]))
        if current_chunk:
            named_entity = " ".join(current_chunk)
            if named_entity not in continuous_chunk:
                continuous_chunk.append(named_entity)
                current_chunk = []
            else:
                continue
    return continuous_chunk


if __name__ == "__main__":
    tweets_data_path = 'tweets_by_country/'
    all_files = get_all_files(Config.base_path + tweets_data_path, extension='csv')
    out = {}
    for country in Config.country_prefix:
        df = pd.DataFrame()
        news_files = list(filter(lambda x: country in x, all_files))
        for file in news_files:
            data = pd.read_csv(file, names=Config.colnames, usecols=Config.usecols_list)
            data.dropna(axis=0, how='any', inplace=True)
            df = df.append(data, ignore_index=True)
        text_df = filter_df(Config.keywords, df)
        text_list = df_to_str(text_df)
        text_str = '. '.join(text_list)
        namedEntities = get_continuous_chunks(text_str)
        out[country.replace('_', '')] = namedEntities
    save_to_disk(data=out,
                 path=Config.base_path + 'report_named_entities/',
Esempio n. 37
0
            ") && "
            "cd {0} && "
            "git add -A && "
            "git commit --amend -q -m Autogenerated && "
            "git push -f origin master:gh-pages"
        .format(doc_dir, branch),
        "deploy-doc",
        [doc_rule],
        phony=True,
        no_format=True)

mk.init_logger()

root = "src"

srcs = list(utils.get_all_files(root))

pp_rules = list(make_preprocess_rules(srcs))

rules, ppfiles = make_copy_header_rules(srcs, [".h", ".hpp"], prefix="calico/")
prepare_rule = mk.alias("prepare", ppfiles)

test_rules = list(make_test_rules(srcs, [".c", ".cpp"], root))

build_rule = mk.alias("build", rules + [prepare_rule])

build_bench_rule = mk.alias("build-bench", [x[2] for x in test_rules
                                            if x[2] is not None])

build_check_rule = mk.alias("build-check", [x[0] for x in test_rules
                                            if x[0] is not None])
Esempio n. 38
0
    maindir = sys.argv[1]
    output = sys.argv[2]

    # sanity checks
    if not os.path.isdir(maindir):
        print 'ERROR: directory', maindir, 'does not exists.'
        sys.exit(0)
    if os.path.isfile(output):
        print 'ERROR: file', output, 'exists, delete or provide a new filename.'
        sys.exit(0)

    # start time
    t1 = time.time()

    # get all h5 files
    allh5 = utils.get_all_files(maindir, ext='.h5')
    print 'found', len(allh5), 'H5 files.'

    # create summary file
    HDF5.create_aggregate_file(output,
                               expectedrows=len(allh5),
                               summaryfile=True)
    print 'Summary file created, we start filling it.'

    # fill it
    h5 = HDF5.open_h5_file_append(output)
    HDF5.fill_hdf5_aggregate_file(h5, allh5, summaryfile=True)
    h5.close()

    # done!
    stimelength = str(datetime.timedelta(seconds=time.time() - t1))
    ES_HOST = sys.argv[3]
    start_time = time.time()
    logging.basicConfig(format='%(asctime)s %(message)s',
                        datefmt='%m/%d/%Y %I:%M:%S %p',
                        filename='./Logs/mterms_data_preparation.log',
                        level=logging.INFO)

    logger = logging.getLogger('LiteratureAnalysis')
    logger.info('Started')
    db_connection = elasticsearch_connection.ElasticsearchConnection(
        ES_HOST, ES_AUTH_USER, ES_AUTH_PASSWORD)
    elasticsearch_client = db_connection.get_elasticsearch_client()
    dir_path = r"D:/ASU_Part_time/LiteratureAnalysis/TermVectorJstorData/"
    extension = "json"
    files_to_proceed = utils.get_all_files(
        dir_path,
        extension)[::-1][len(utils.get_all_files(dir_path, extension)) -
                         24302:]

    for file_number, file in enumerate(files_to_proceed):
        completed_files = open("./completed_files_list.csv", 'a')
        noncompleted_files = open("./noncompleted_files_list.csv", 'a')
        batch_start_time = time.time()
        try:
            print("Processing of {} file number {} Started ".format(
                file.split("\\")[-1], 24302 - file_number - 1))
            logging.info(
                "Processing of file number {} with filename {} started".format(
                    24302 - file_number - 1, file))
            get_term_vectors(file, elasticsearch_client,
                             24302 - file_number - 1)
            print("Processing of file number {} Completed".format(24302 -
Esempio n. 40
0
def render_line2(filename):
    if is_animation(parse_tags(filename)):
        data = []

        def get_line_data(filename, frame):
            filename = filename.replace('0000', '%04d' % frame)
            data_x = np.load(filename)
            data_y = np.load(filename.replace(',' + X_TAG, ',' + Y_TAG))
            return (data_x, data_y)

        def update_lines(frame, line):
            x, y = data[frame]
            line.set_data(x, y)
            return line,

        dirname = os.path.dirname(filename)
        basename = os.path.basename(filename).replace('0000',
                                                      '[0-9][0-9][0-9][0-9]')
        seq = utils.get_all_files(dirname, [basename])
        seq.sort()
        if len(seq) == 0:
            return

        data_x, data_y = get_line_data(seq[0], 0)
        has_bbox = False
        for frame in range(len(seq)):
            data_x, data_y = get_line_data(seq[frame], frame)
            if len(data_x) > 0 and len(data_y) > 0:
                if has_bbox:
                    xmin = min(xmin, data_x.min())
                    xmax = max(xmax, data_x.max())
                    ymin = min(ymin, data_y.min())
                    ymax = max(ymax, data_y.max())
                else:
                    xmin = data_x.min()
                    xmax = data_x.max()
                    ymin = data_y.min()
                    ymax = data_y.max()
                    has_bbox = True
            data.append((data_x, data_y))

        fig, ax = plt.subplots()
        x, y = get_line_data(filename, 0)

        if has_bbox:
            xmid = (xmax + xmin) / 2.0
            ymid = (ymax + ymin) / 2.0
            new_xmin = xmid - 1.25 * (xmid - xmin)
            new_xmax = xmid + 1.25 * (xmax - xmid)
            new_ymin = ymid - 1.25 * (ymid - ymin)
            new_ymax = ymid + 1.25 * (ymax - ymid)
            ax.set_xlim(new_xmin, new_xmax)
            ax.set_ylim(new_ymin, new_ymax)
        ax.set_aspect('equal')
        line, = ax.plot(x, y, lw=2, marker='o', markersize=3)
        anim = animation.FuncAnimation(fig,
                                       update_lines,
                                       len(seq),
                                       fargs=(line, ),
                                       interval=60,
                                       blit=False)
        output_filename = get_output_movie_filename(
            filename.replace(',' + X_TAG, ''))
        anim.save(output_filename,
                  fps=60,
                  bitrate=5000,
                  writer=video_writer,
                  extra_args=video_extra_args)
        plt.close(fig)
        print('Rendered <%s>' % output_filename)
    else:
        output_filename = get_output_bitmap_filename(
            filename.replace(',' + X_TAG, ''))
        render_still_line2(filename, filename.replace(',' + X_TAG,
                                                      ',' + Y_TAG),
                           output_filename)
Esempio n. 41
0
import os
import utils
from tag import Tag
from doc_loader import get_data

if __name__ == '__main__':
    from sys import argv, stderr
    if len(argv) < 2:
        print("Usage: {} input_dir output_dir".format(argv[0]), file=stderr)
        exit(1)

    # Load all python files in the directory (and subdirectories)
    output = argv[2]
    if not os.path.isdir(output):
        os.mkdir(output)
    files = utils.get_all_files(argv[1], '.py')

    Tag.load_tags('tags.yaml', True)

    # Convert all python files into AST objects
    classes = []
    functions = []
    for file in files:
        _ast = utils.load_ast(file)
        classes.extend(utils.get_classes(_ast))
        functions.extend(utils.get_functions(_ast))

    # Create the readme, then create all subsequent 
    data = get_data(classes, functions, output)
    readme_template = utils.load_template("../templates/readme.md", True)
    md = readme_template.render(data)
Esempio n. 42
0
    _y = []

    for xyz in xyz_file:
        _x.append(float(xyz.split()[0]))
        _y.append(float(xyz.split()[1]))

    line.set_data(_x, _y)
    xyz_file.close()

    return line,


# Simulation Path
path = sys.argv[1]
output_name = sys.argv[2] if len(sys.argv) > 2 else 'result.mp4'
files = utils.get_all_files(path, "*.xyz")
size_files = len(files)

# Set up formatting for the movie files
Writer = animation.writers['ffmpeg']
writer = Writer(fps=25, bitrate=1800)

fig1 = plt.figure()

data = []
l, = plt.plot([], [], 'ro')

plt.xlim(0, 1)
plt.ylim(0, 1)
plt.xlabel('x')
plt.title('Simulation Result')