Beispiel #1
0
 def __init__(self):
     self.metadata = Metadata(True, 'lda')
     self.dataProvider = DataProvider()
     self.topicFile = 'tmp/topics.npy'
     self.kMeansFile = 'tmp/kmeans.pkl.npy'
Beispiel #2
0
log.debug("Input file: %s." % inputfile)
log.debug("Original name: %s." % original)
log.debug("IMDB ID: %s." % imdbid)

info = converter.isValidSource(inputfile)
if info:
    log.info("Processing %s." % inputfile)

    output = converter.process(inputfile, original=original, info=info)

    if output:
        # Tag with metadata
        try:
            tag = Metadata(MediaType.Movie,
                           imdbid=imdbid,
                           original=original,
                           language=settings.taglanguage)
            if settings.tagfile:
                log.info('Tagging file with TMDB ID %s.', tag.tmdbid)
                tag.setHD(output['x'], output['y'])
                tag.writeTags(output['output'], settings.artwork,
                              settings.thumbnail)
        except:
            log.exception("Unable to tag file")

        # QTFS
        if settings.relocate_moov:
            converter.QTFS(output['output'])

        # Copy to additional locations
        output_files = converter.replicate(output['output'])
# -*- coding: utf-8 -*-

from common import logo, genres, repo_base_url
from common import Track, get_files
from metadata import Metadata


print(logo)
meta = Metadata()
print("version " + meta.get_version() + "\n")


def add_source(m3u_file, track):
    prettified_title = track.title.replace("-", " ")
    prettified_title = prettified_title.replace(".m3u", "")
    source = "#EXTINF:" + str(track.length) + "," + prettified_title + "\n"
    source = source + track.path + "\n"
    m3u_file += source

    return m3u_file


for genre in genres:
    files = get_files(genre)
    m3u_file = "#EXTM3U\n"
    for file in files:
        track = Track(-1, file, (repo_base_url + "/" + genre + "/" + file))
        m3u_file = add_source(m3u_file, track)

    file = open("../" + genre + "/" + "ALL_" + genre + ".m3u", "w")
    file.write(m3u_file)
Beispiel #4
0
def main():
    parser = argparse.ArgumentParser(
        description='Maven based download index generator')
    parser.add_argument(
        '--webout',
        dest='output_web',
        default='/out',
        help=
        'Base directory to output generated index pages. Will generate in sub-directories based on the maven path',
        type=parse_path)
    parser.add_argument(
        '--metaout',
        dest='output_meta',
        default='/out',
        help=
        'Base directory to output generated metadata. Will generate in sub-directories based on the maven path',
        type=parse_path)
    parser.add_argument('--downloadroot',
                        dest='dlroot',
                        default='https://maven.minecraftforge.net/',
                        help='Root URL for downloading artifacts')
    parser.add_argument('--webroot',
                        dest='webroot',
                        default='https://files.minecraftforge.net',
                        help='Root URL for artifact pages')
    parser.add_argument(
        '--static',
        dest='static',
        default='https://files.minecraftforge.net/static/',
        help='Root URL for static assets used by the templates')

    parser.add_argument(
        '--folder',
        dest='folder',
        default='/in/repositories/releases/',
        help=
        'Root directory for the maven structure to read metadata from files',
        type=parse_path)
    parser.add_argument('--config',
                        dest='config',
                        default='/in/global_overrides.json',
                        help="Location of global_overrides.json file",
                        type=parse_path)
    parser.add_argument('--templates',
                        dest='templates',
                        default='templates',
                        type=parse_path,
                        help="Path to templates")

    commands = parser.add_subparsers(help='Command to perform',
                                     dest='command',
                                     required=True)

    gen_command = commands.add_parser('gen',
                                      help='Indexes generator subcommand')
    gen_command.add_argument('artifact',
                             help='Maven Artifact - net.minecraftforge:forge')

    index_command = commands.add_parser('index',
                                        help='Generate tracked project index')

    promote_command = commands.add_parser('promote', help='Promote subcommand')
    promote_command.add_argument(
        'artifact', help='Maven Artifact - net.minecraftforge:forge')
    promote_command.add_argument('version', help='Maven Version')
    promote_command.add_argument('type',
                                 choices=['latest', 'recommended'],
                                 help='Type of promotion')
    args = parser.parse_args()

    print('Page Generator:')
    print(f'PyVer:    {sys.version}')
    print(f'Folder:   {args.folder}')
    print(f'Config:   {args.config}')
    print(f'Web Out:  {args.output_web}')
    print(f'Meta Out: {args.output_meta}')
    print(f'WebRoot:  {args.webroot}')
    print(f'DLRoot:   {args.dlroot}')
    print(f'Static:   {args.static}')
    print(f'Templates:{args.templates}')
    print(f'Command:  {args.command}')
    print(f'Artifact: {args.artifact if "artifact" in args else None}')
    print(f'Version:  {args.version if "version" in args else None}')
    print(f'Type:     {args.type if "type" in args else None}')

    metadata = Metadata(args.folder, args.output_meta, args.output_web,
                        args.webroot, args.dlroot, args.static, args.config)
    artifact = Artifact.load_maven_xml(
        metadata, args.artifact) if 'artifact' in args else None
    templates = Templates(args.templates, args.static, args.webroot,
                          args.dlroot)

    for gen in Generators[args.command]:
        gen.generate(metadata, artifact, templates, args)
Beispiel #5
0
from metadata import Metadata

metadata = Metadata('/tmp/retile', 'label')


def test__metadata():
    _metadata = {
        'name': 'name',
        'label': 'label',
        'provides_product_versions': [{
            'name': 'name'
        }],
        'releases': [],
        'property_blueprints': [],
        'job_types': []
    }

    metadata._mutate_metadata(_metadata)

    assert _metadata['name'] == 'name-label'
    assert _metadata['provides_product_versions'][0]['name'] == 'name-label'
    assert _metadata['label'] == 'label Label'


def test__releases():
    releases = [{
        'name': 'redis-enterprise',
        'file': 'foo-bar-baz-3243151.23431.31143.pivotal'
    }, {
        'name': 'not-redis-enterprise'
    }]
Beispiel #6
0
test_proportion = 0.25
print('input_size', input_size)
print('record_size', reference_size)

# Path setting
data_path = '/vol/bitbucket/rh2515/MRI_Crohns'
label_path = '/vol/bitbucket/rh2515/MRI_Crohns/labels'
record_out_path = '/vol/bitbucket/rh2515/MRI_Crohns/tfrecords/ti_imb_generic'
record_suffix = 'axial_t2_only'

# Load data
abnormal_cases = list(range(70))
healthy_cases = list(range(100))
metadata = Metadata(data_path,
                    label_path,
                    abnormal_cases,
                    healthy_cases,
                    dataset_tag='')
# metadata = Metadata(data_path, label_path, abnormal_cases, healthy_cases, dataset_tag=' cropped')

print('Loading images...')
for patient in metadata.patients:
    print(f'Loading patient {patient.get_id()}')
    patient.load_image_data()

# Preprocess data
preprocessor = Preprocessor(constant_volume_size=reference_size)
metadata.patients = preprocessor.process(metadata.patients,
                                         ileum_crop=False,
                                         region_grow_crop=True,
                                         statistical_region_crop=True)
Beispiel #7
0
def test_simple_tm(dut):
    """Testing the simple_tm module 
    """

    # start HW sim clock
    cocotb.fork(Clock(dut.axis_aclk, PERIOD).start())

    # Reset the DUT
    dut._log.debug("Resetting DUT")
    dut.axis_resetn <= 0
    yield ClockCycles(dut.axis_aclk, 10)
    dut.axis_resetn <= 1
    dut._log.debug("Out of reset")

    # wait for the pifo to finish resetting
    yield wait_pifo_busy(dut)

    yield ClockCycles(dut.axis_aclk, 100)
    dut.m_axis_tready <= 0

    # build the list of pkts and metadata to insert
    pkts_meta_in = []
    for i in range(NUM_PKTS):
        #        pkt_len = random.randint(50, 1000)
        # build a packet
        pkt = Ether(dst='aa:aa:aa:aa:aa:aa', src='bb:bb:bb:bb:bb:bb')
        pkt = pkt / ('\x11' * 18 + '\x22' * 32)

        rank = random.randint(0, 100)

        # build the metadata
        meta = Metadata(pkt_len=len(pkt),
                        src_port=0b00000001,
                        dst_port=0b00000100,
                        rank=rank)
        tuser = BinaryValue(bits=len(meta) * 8, bigEndian=False)
        tuser.set_buff(str(meta))

        pkts_meta_in.append((rank, pkt, tuser))

    ranks_in = [tup[0] for tup in pkts_meta_in]
    pkts_in = [tup[1] for tup in pkts_meta_in]
    meta_in = [tup[2] for tup in pkts_meta_in]

    # Attach an AXI4Stream Master to the input pkt interface
    pkt_master = AXI4StreamMaster(dut, 's_axis', dut.axis_aclk)
    pkt_in_stats = AXI4StreamStats(dut,
                                   's_axis',
                                   dut.axis_aclk,
                                   idle_timeout=IDLE_TIMEOUT)
    pkt_in_stats_thread = cocotb.fork(
        pkt_in_stats.record_n_delays(len(pkts_in)))

    # Send pkts and metadata in the HW sim
    yield pkt_master.write_pkts(pkts_in, meta_in)

    # wait for pifo to no longer be busy
    yield wait_pifo_busy(dut)
    # delay between writing pkts and reading them out
    yield ClockCycles(dut.axis_aclk, 25)
    # wait for the pifo to finish the final enqueue
    yield FallingEdge(dut.axis_aclk)
    while dut.simple_tm_inst.pifo_busy.value:
        yield RisingEdge(dut.axis_aclk)
        yield FallingEdge(dut.axis_aclk)
    yield RisingEdge(dut.axis_aclk)

    # Attach an AXI4StreamSlave to the output pkt interface
    pkt_slave = AXI4StreamSlave(dut,
                                'm_axis',
                                dut.axis_aclk,
                                idle_timeout=IDLE_TIMEOUT)
    pkt_out_stats = AXI4StreamStats(dut,
                                    'm_axis',
                                    dut.axis_aclk,
                                    idle_timeout=IDLE_TIMEOUT)
    pkt_out_stats_thread = cocotb.fork(
        pkt_out_stats.record_n_delays(len(pkts_in)))

    # Read pkts out
    yield pkt_slave.read_n_pkts(len(pkts_in))

    #    # wait for stats threads to finish
    #    yield pkt_in_stats_thread.join()
    #    yield pkt_out_stats_thread.join()

    sorted_pkts_meta = sorted(pkts_meta_in, key=lambda x: x[0])

    expected_ranks = [tup[0] for tup in sorted_pkts_meta]
    expected_pkts = [tup[1] for tup in sorted_pkts_meta]
    expected_meta = [tup[2] for tup in sorted_pkts_meta]

    pkts_out = pkt_slave.pkts
    meta_out = pkt_slave.metadata

    actual_ranks = [Metadata(m.get_buff()).rank for m in meta_out]

    print 'input ranks           = {}'.format(ranks_in)
    print 'expected output ranks = {}'.format(expected_ranks)
    print 'actual output ranks   = {}'.format(actual_ranks)
    print ''
    print 'pkt_in_delays = {}'.format(pkt_in_stats.delays)
    print 'pkt_out_delays = {}'.format(pkt_out_stats.delays)
    print '\tmax = {}'.format(max(pkt_out_stats.delays))
    print '\tavg = {}'.format(
        sum(pkt_out_stats.delays) / float(len(pkt_out_stats.delays)))
    print '\tmin = {}'.format(min(pkt_out_stats.delays))

    results = {}
    results['enq_delays'] = pkt_in_stats.delays
    results['deq_delays'] = pkt_out_stats.delays
    with open(RESULTS_FILE, 'w') as f:
        json.dump(results, f)

    error = False
    for (exp_pkt, pkt, exp_meta, meta, i) in zip(expected_pkts, pkts_out,
                                                 expected_meta, meta_out,
                                                 range(len(expected_pkts))):
        if str(exp_pkt) != str(pkt):
            print 'ERROR: exp_pkt != pkt_out for pkt {}'.format(i)
            error = True
        if exp_meta.get_buff() != meta.get_buff():
            print 'ERROR: exp_meta != meta_out for pkt {}'.format(i)
            exp_meta = Metadata(exp_meta.get_buff())
            meta = Metadata(meta.get_buff())
            print 'exp_meta = {}'.format(exp_meta.summary())
            print 'meta = {}'.format(meta.summary())
            error = True

    print "******************"
    print "Checking for duplicates:"
    print "******************"
    for r, i in zip(actual_ranks, range(len(actual_ranks))):
        try:
            ranks_in.remove(r)
        except ValueError as e:
            print 'ERROR: output rank ({}) not in input set'.format(r)
            print e
            error = True
    if len(ranks_in) > 0:
        print 'ERROR: not all ranks removed: {}'.format(ranks_in)
        error = True

    yield ClockCycles(dut.axis_aclk, 20)

    if error:
        print 'ERROR: Test Failed'
        raise (TestFailure)
    def onPlayBackStarted(self):  # pylint: disable=invalid-name
        """Called when user starts playing a file"""
        self.path = getInfoLabel(self.path_infolabel)
        if self.path.startswith('plugin://plugin.video.vrt.nu/'):
            self.listen = True
        else:
            self.listen = False
            return

        log(3, '[PlayerInfo {id}] Event onPlayBackStarted', id=self.thread_id)

        # Set property to let wait_for_resumepoints function know that update resume is busy
        set_property('vrtnu_resumepoints', 'busy')

        # Update previous episode when using "Up Next"
        if self.path.startswith('plugin://plugin.video.vrt.nu/play/upnext'):
            self.push_position(position=self.last_pos, total=self.total)

        # Reset episode data
        self.asset_id = None
        self.title = None
        self.url = None
        self.whatson_id = None

        ep_id = play_url_to_id(self.path)

        # Avoid setting resumepoints for livestreams
        for channel in CHANNELS:
            if ep_id.get('video_id') and ep_id.get('video_id') == channel.get(
                    'live_stream_id'):
                log(3,
                    '[PlayerInfo {id}] Avoid setting resumepoints for livestream {video_id}',
                    id=self.thread_id,
                    video_id=ep_id.get('video_id'))
                self.listen = False

                # Reset vrtnu_resumepoints property before return
                set_property('vrtnu_resumepoints', None)
                return

        # Get episode data needed to update resumepoints from VRT NU Search API
        episode = self.apihelper.get_single_episode_data(
            video_id=ep_id.get('video_id'),
            whatson_id=ep_id.get('whatson_id'),
            video_url=ep_id.get('video_url'))

        # Avoid setting resumepoints without episode data
        if episode is None:
            # Reset vrtnu_resumepoints property before return
            set_property('vrtnu_resumepoints', None)
            return

        from metadata import Metadata
        self.asset_id = Metadata(None, None).get_asset_id(episode)
        self.title = episode.get('program')
        self.url = url_to_episode(episode.get('url', ''))
        self.whatson_id = episode.get(
            'whatsonId') or None  # Avoid empty string

        # Kodi 17 doesn't have onAVStarted
        if kodi_version_major() < 18:
            self.onAVStarted()
Beispiel #9
0
    def get_mismatches(self, reference_metadata,
                       target_metadata,
                       traverse_sub_workflows=False):
        """
        Takes two metadata files (both belonging to a common
        workflow execution), iterates through the outputs of
        their task, downloads the objects if not already exist
        in the working directory, compares the corresponding
        files, and returns the files that do not match.
        """
        def record_compare_result(match, reference, target):
            if not match:
                if call not in mismatches:
                    mismatches[call] = []
                mismatches[call].append([reference, target])

        # First we define a method that takes a list
        # of a task outputs, and keeps only those that
        # are files and their extension match the
        # file types that we want to compare
        # (e.g., filter only VCF files).
        filter_method = FilterBasedOnExtensions(
            self.filetypes_to_compare.keys()).filter

        # Then we create two instances of the Metadata
        # class, one for each metadata file, and we
        # invoke the `get_outputs` method which traverses
        # the outputs of task, and returns those filtered
        # by the above-defined filter.
        ref_output_files = Metadata(reference_metadata).get_outputs(
            traverse_sub_workflows, filter_method)
        test_output_files = Metadata(target_metadata).get_outputs(
            traverse_sub_workflows, filter_method)

        mismatches = {}
        i = 0

        r_t = ref_output_files.keys() - test_output_files.keys()
        t_r = test_output_files.keys() - ref_output_files.keys()
        if r_t or t_r:
            print(f"\n{COLOR_BLINKING}WARNING!{COLOR_ENDC}")
            print(f"The reference and test metadata files differ "
                  f"in their outputs; "
                  f"{COLOR_ULINE}the differences will be skipped.{COLOR_ENDC}")
            if r_t:
                print(f"\t{len(r_t)}/{len(ref_output_files.keys())} "
                      f"outputs of the reference are not in the test:")
                for x in r_t:
                    print(f"\t\t- {x}")
            if t_r:
                print(f"\t{len(t_r)}/{len(test_output_files.keys())} "
                      f"outputs of the test are not in the reference:")
                for x in t_r:
                    print(f"\t\t- {x}")
            print("\n")

        [ref_output_files.pop(x) for x in r_t]
        print(f"{COLOR_YELLOW}Comparing {len(ref_output_files)} "
              f"files that are common between reference and test "
              f"metadata files and their respective task is executed "
              f"successfully.{COLOR_ENDC}")
        for call, ref_outputs in ref_output_files.items():
            i += 1
            matched = True
            print(f"Comparing\t{i}/{len(ref_output_files)}\t{call} ... ", end="")
            for extension, objs in ref_outputs.items():
                if len(objs) != len(test_output_files[call][extension]):
                    record_compare_result(False, objs, test_output_files[call][extension])
                    matched = False
                    continue
                for idx, obj in enumerate(objs):
                    equals, x, y = \
                        self.filetypes_to_compare[extension].equals(
                            obj, test_output_files[call][extension][idx])
                    record_compare_result(equals, x, y)
                    if not equals:
                        matched = False
            if matched:
                print(f"{COLOR_GREEN}match{COLOR_ENDC}")
            else:
                print(f"{COLOR_RED}mismatch{COLOR_ENDC}")
        return mismatches
Beispiel #10
0
def load_fnames(md_path, zindexes):
    md = Metadata(md_path)
    md.image_table = md.image_table[[
        True if (('hybe' in i) or ('nucstain' in i)) else False
        for i in md.image_table.acq
    ]]
    posnames = md.posnames
    # Speed up checking if a position is already fully done
    not_finished = []
    finished_pos = []
    for pos in posnames:
        try:
            processing = pickle.load(
                open(
                    os.path.join(md_path, 'codestacks', pos, 'processing.pkl'),
                    'rb'))
            if len(processing
                   ) == 18:  # Check to make sure position is finished
                finished_pos.append(pos)
            else:
                not_finished.append(pos)
        except:
            not_finished.append(pos)
    random_posnames = np.random.choice(not_finished,
                                       size=len(not_finished),
                                       replace=False)
    hybe_list = sorted([
        i.split('_')[0] for i in md.acqnames
        if ('hybe' in i) or ('nucstain' in i)
    ])
    if zindexes == -1:
        Input = [{
            'fname_dicts':
            md.stkread(Channel='DeepBlue',
                       Position=pos,
                       fnames_only=True,
                       groupby='acq',
                       hybe=hybe_list),
            'posnames':
            pos
        } for pos in random_posnames]
    else:
        Input = [{
            'fname_dicts':
            md.stkread(Channel='DeepBlue',
                       Position=pos,
                       fnames_only=True,
                       groupby='acq',
                       hybe=hybe_list,
                       Zindex=list(range(8, 25))),
            'posnames':
            pos
        } for pos in random_posnames]


#         Input = []
#         for pos in random_posnames:
#             pos_df = {}
#             for hybe in hybe_list:
#                 pos_df[hybe] = []
#             for z in range(1,zindexes):
#                 fnames = md.stkread(Channel='DeepBlue', Position=pos,
#                                     fnames_only=True, groupby='acq',
#                                     hybe=hybe_list,Zindex=z)
#                 for h,f in fnames.items():
#                     pos_df[h.split('_')[0]].append(f)
#             Input.append({'fname_dicts':pos_df,'posnames': pos})
    return Input
Beispiel #11
0
 def get_metadata(file_path):
     return Metadata(file_path)
def test_pifo_pkt_storage(dut):
    """Testing pifo_pkt_storage module
    """

    # instantiate the interface to the simpy implemenation
    env = simpy.Environment()
    ps_simpy = PS_simpy_iface(env)

    # start HW sim clock
    cocotb.fork(Clock(dut.axis_aclk, PERIOD).start())

    # Reset the DUT
    dut._log.debug("Resetting DUT")
    dut.axis_resetn <= 0
    dut.m_axis_pkt_tready <= 0
    yield ClockCycles(dut.axis_aclk, 10)
    dut.axis_resetn <= 1
    dut.m_axis_pkt_tready <= 1
    dut._log.debug("Out of reset")

    # initialize the read request pointers
    dut.s_axis_ptr_tvalid <= 0
    dut.s_axis_ptr_tdata <= 0
    dut.s_axis_ptr_tlast <= 0

    # Attach an AXI4Stream Master to the input pkt interface
    pkt_master = AXI4StreamMaster(dut, 's_axis_pkt', dut.axis_aclk)

    # Attach and AXI4StreamSlave to the output ptr interface
    ptr_slave = AXI4StreamSlave(dut, 'm_axis_ptr', dut.axis_aclk)

    hw_results = {}
    hw_results['out_ptrs'] = []

    # build the list of pkts and metadata to insert
    all_pkts = []
    all_meta = []
    for i in range(10):
        pkt_len = random.randint(50, 1000)
        # build a packet
        pkt = Ether(dst='aa:aa:aa:aa:aa:aa', src='bb:bb:bb:bb:bb:bb')
#        pkt = pkt / ('\x11'*18 + '\x22'*32 + '\x33'*32 + '\x44'*32 + '\x55'*16)
        pkt = pkt / ('\x11'*(pkt_len - 14))

    
        # build the metadata 
        meta = Metadata(pkt_len=len(pkt), src_port=0b00000001, dst_port=0b00000100)

        all_pkts.append(pkt)
        all_meta.append(meta)

    # send pkts / metadata and read resulting pointers
    for p, m in zip(all_pkts, all_meta):
        # Start reading for output ptrs
        ptr_slave_thread = cocotb.fork(ptr_slave.read())
    
        # send the pkts in the HW sim
        tuser = BinaryValue(bits=len(m)*8, bigEndian=False)
        tuser.set_buff(str(m))
        yield pkt_master.write_pkts([p], [tuser])
    
        # wait to finish reading pointers
        yield ptr_slave_thread.join()
   
    # ptr_slave.data is all of the ptr words that have been read so far
    hw_results['out_ptrs'] = ptr_slave.data

    # check results with simpy simulation
    env.process(ps_simpy.insert_pkts(all_pkts, all_meta, hw_results))
    env.run()

    if ps_simpy.test_failed:
        raise TestFailure('Test Failed')

    # pause between pkt insertions and removals
    yield ClockCycles(dut.axis_aclk, 10)

    # Attach an AXI Stream master to read request interface
    ptr_master = AXI4StreamMaster(dut, 's_axis_ptr', dut.axis_aclk)

    # Attach an AXI Stream slave to the output packet interface
    pkt_slave = AXI4StreamSlave(dut, 'm_axis_pkt', dut.axis_aclk) 

    # remove the inserted pkts 
    hw_results['out_pkts'] = []
    for ptrs in hw_results['out_ptrs']:
        # start reading for output pkts

        pkt_slave_thread = cocotb.fork(pkt_slave.read_pkt())
 
        # submit read request
        yield ptr_master.write([ptrs])

        # wait to finish reading pkt
        yield pkt_slave_thread.join()

        yield RisingEdge(dut.axis_aclk)

    hw_results['out_pkts'] = pkt_slave.pkts
    hw_results['out_meta'] = [Metadata(m.get_buff()) for m in pkt_slave.metadata]

    # verify that the removed pkts are the same as the ones that were inserted
    if len(all_pkts) != len(hw_results['out_pkts']):
        print 'ERROR: {} pkts inserted, {} pkts removed'.format(len(all_pkts), len(hw_results['out_pkts']))
        raise TestFailure('Test Failed')

    for (pkt_in, pkt_out, meta_in, meta_out, i) in zip(all_pkts, hw_results['out_pkts'], all_meta, hw_results['out_meta'], range(len(all_pkts))):
        if str(pkt_in) != str(pkt_out):
            print 'ERROR: pkt_in != pkt_out for pkt {}'.format(i)
            print 'len(pkt_in) = {}, pkt_in: {}'.format(len(pkt_in), pkt_in.summary())
            print 'len(pkt_out) = {}, pkt_out: {}'.format(len(pkt_out), pkt_out.summary())
#            raise TestFailure('Test Failed')
        if str(meta_in) != str(meta_out):
            print 'ERROR: meta_in != meta_out for pkt {}'.format(i)
            print '\tmeta_in = {}'.format(meta_in.summary())
            print '\tmeta_out = {}'.format(meta_out.summary())
#            raise TestFailure('Test Failed')


    yield ClockCycles(dut.axis_aclk, 20)
Beispiel #13
0
 def __init__(self):
     self.metadata = Metadata()
def test_both_enqdeq(dut):
    """Testing the simple_tm module with a constant fill level
    """

    # start HW sim clock
    cocotb.fork(Clock(dut.axis_aclk, PERIOD).start())

    # Reset the DUT
    dut._log.debug("Resetting DUT")
    dut.axis_resetn <= 0
    yield ClockCycles(dut.axis_aclk, 10)
    dut.axis_resetn <= 1
    dut._log.debug("Out of reset")

    # wait for the pifo to finish resetting
    yield FallingEdge(dut.axis_aclk)
    while dut.simple_tm_inst.pifo_busy.value:
        yield RisingEdge(dut.axis_aclk)
        yield FallingEdge(dut.axis_aclk)
    yield RisingEdge(dut.axis_aclk)

    yield ClockCycles(dut.axis_aclk, 100)
    dut.m_axis_tready <= 0

    # build the list of pkts and metadata to insert
    pkts_meta_in = make_pkts_and_meta(FILL_LEVEL - 1)

    ranks_in = [tup[0] for tup in pkts_meta_in]
    pkts_in = [tup[1] for tup in pkts_meta_in]
    meta_in = [tup[2] for tup in pkts_meta_in]

    # Attach an AXI4Stream Master to the input pkt interface
    pkt_master = AXI4StreamMaster(dut, 's_axis', dut.axis_aclk)

    # Send pkts and metadata in the HW sim
    yield pkt_master.write_pkts(pkts_in, meta_in)

    # wait a few cycles before begining measurements
    yield ClockCycles(dut.axis_aclk, 25)

    # Attach and AXI4StreamSlave to the output pkt interface
    pkt_slave = AXI4StreamSlave(dut,
                                'm_axis',
                                dut.axis_aclk,
                                idle_timeout=IDLE_TIMEOUT)

    # connect stats tools
    pkt_in_stats = AXI4StreamStats(dut,
                                   's_axis',
                                   dut.axis_aclk,
                                   idle_timeout=IDLE_TIMEOUT)
    pkt_out_stats = AXI4StreamStats(dut,
                                    'm_axis',
                                    dut.axis_aclk,
                                    idle_timeout=IDLE_TIMEOUT)

    expected_outputs = []
    enq_delays = []
    deq_delays = []
    for i in range(NUM_SAMPLES):
        data = make_pkts_and_meta(1)
        pkts_in = [tup[1] for tup in data]
        meta_in = [tup[2] for tup in data]
        # compute expected outputs
        expected_outputs.append(min(pkts_meta_in))
        pkts_meta_in.remove(min(pkts_meta_in))
        # The removal happens after the insertion
        pkts_meta_in += data
        #        # start recording stats
        #        pkt_in_stats_thread = cocotb.fork(pkt_in_stats.record_n_delays(1))
        #        pkt_out_stats_thread = cocotb.fork(pkt_out_stats.record_n_delays(1))

        # send in packet
        pkt_master_thread = cocotb.fork(pkt_master.write_pkts(
            pkts_in, meta_in))
        # Read out packet
        pkt_slave_thread = cocotb.fork(pkt_slave.read_n_pkts(1))
        yield pkt_master_thread.join()
        yield pkt_slave_thread.join()
        #        # record results
        #        enq_delays += pkt_in_stats.delays
        #        deq_delays += pkt_out_stats.delays
        # wait a few cycles between samples
        yield ClockCycles(dut.axis_aclk, 30)
        if pkt_slave.error:
            print "ERROR: pkt_slave timed out"
            break

    sorted_pkts_meta = sorted(pkts_meta_in, key=lambda x: x[0])

    expected_ranks = [tup[0] for tup in expected_outputs]
    expected_pkts = [tup[1] for tup in expected_outputs]
    expected_meta = [tup[2] for tup in expected_outputs]

    pkts_out = pkt_slave.pkts
    meta_out = pkt_slave.metadata

    actual_ranks = [Metadata(m.get_buff()).rank for m in meta_out]

    print 'input_ranks           = {}'.format(input_ranks)
    print 'expected output ranks = {}'.format(expected_ranks)
    print 'actual output ranks   = {}'.format(actual_ranks)
    print ''
    print 'pkt_in_delays = {}'.format(enq_delays)
    print 'pkt_out_delays = {}'.format(deq_delays)

    results = {}
    results['enq_delays'] = enq_delays
    results['deq_delays'] = deq_delays
    with open(RESULTS_FILE, 'w') as f:
        json.dump(results, f)

    error = False
    for (exp_pkt, pkt, exp_meta, meta, i) in zip(expected_pkts, pkts_out,
                                                 expected_meta, meta_out,
                                                 range(len(expected_pkts))):
        if str(exp_pkt) != str(pkt):
            print 'ERROR: exp_pkt != pkt_out for pkt {}'.format(i)
            error = True
        if exp_meta.get_buff() != meta.get_buff():
            print 'ERROR: exp_meta != meta_out for pkt {}'.format(i)
            exp_meta = Metadata(exp_meta.get_buff())
            meta = Metadata(meta.get_buff())
            print 'exp_meta = {}'.format(exp_meta.summary())
            print 'meta = {}'.format(meta.summary())
            error = True

    yield ClockCycles(dut.axis_aclk, 20)

    if error:
        print 'ERROR: Test Failed'
        raise (TestFailure)
Beispiel #15
0
 def __init__(self):
   """
   Attributes
   ----------
   metadata: dict
     presentation metadata; each element of the dictionary if a dict with ['value', 'user'] items: value contains
     the metadata value and user indicates if the value comes from user (if True) or from defaults (if False).
   """
   self.reset()
   self.metadata = {'title': Metadata(name='title', value=''),
                    'subtitle': Metadata(name='subtitle', value=''),
                    'authors': Metadata(name='authors', value=[]),
                    'authors_short': Metadata(name='authors_short', value=[]),
                    'emails': Metadata(name='emails', value=[]),
                    'affiliations': Metadata(name='affiliations', value=[]),
                    'affiliations_short': Metadata(name='affiliations_short', value=[]),
                    'logo': Metadata(name='logo', value=''),
                    'timer': Metadata(name='timer', value=''),
                    'location': Metadata(name='location', value=''),
                    'location_short': Metadata(name='location_short', value=''),
                    'date': Metadata(name='date', value=''),
                    'conference': Metadata(name='conference', value=''),
                    'conference_short': Metadata(name='conference_short', value=''),
                    'session': Metadata(name='session', value=''),
                    'session_short': Metadata(name='session_short', value=''),
                    'max_time': Metadata(name='max_time', value='25'),
                    'total_slides_number': Metadata(name='total_slides_number', value=''),
                    'dirs_to_copy': Metadata(name='dirs_to_copy', value=[]),
                    'toc': Metadata(name='toc', value=OrderedDict()),
                    'toc_depth': Metadata(name='toc_depth', value='2'),
                    'chaptertitle': Metadata(name='chaptertitle', value=''),
                    'chapternumber': Metadata(name='chapternumber', value=''),
                    'sectiontitle': Metadata(name='sectiontitle', value=''),
                    'sectionnumber': Metadata(name='sectionnumber', value=''),
                    'subsectiontitle': Metadata(name='subsectiontitle', value=''),
                    'subsectionnumber': Metadata(name='subsectionnumber', value=''),
                    'slidetitle': Metadata(name='slidetitle', value=''),
                    'slidenumber': Metadata(name='slidenumber', value=''),
                    'css_overtheme': Metadata(name='css_overtheme', value=[]),
                    'custom': Metadata(name='custom-[0-9]*', value='')}
   self.theme = Theme()
   self.parser = Parser()
   self.chapters = []
   self.position = Position()
   return
from file_manager import FileManager
from worker import Worker
from notifier import Notifier


def root_configurer(log_queue, level):
    h = handlers.QueueHandler(log_queue)
    root = logging.getLogger()
    root.addHandler(h)
    root.setLevel(level)


if __name__ == "__main__":
    processes = list()
    config = Config()
    metadata = Metadata(config.metadata_file)
    log_queue = multiprocessing.Queue(-1)
    listener = Listener(log_queue)
    #    listener.start()
    root_configurer(log_queue, config.logger_level)
    logger = logging.getLogger(__name__)

    pending_tasks = multiprocessing.Queue(-1)
    done_tasks = multiprocessing.Queue(-1)
    error_tasks = multiprocessing.Queue(-1)
    slack_queue = multiprocessing.Queue(-1)

    processes.append(
        FileManager(log_queue, pending_tasks, done_tasks, error_tasks,
                    slack_queue))
    processes.append(Notifier(log_queue, slack_queue))
Beispiel #17
0
    def run_modules(self):
        ag = int(self.gv.settings.args['--aggression'])
        self.debug.print_debug(
            self, u'Running at aggression level {0} {1}'.format(
                ag, "[grrr!]" if ag == 10 else ""))

        if ag > 10:
            self.debug.print_debug(
                self,
                "WARNING: safety bail-out features are disabled at aggression level 11"
            )

        if self.args['bibscan']:

            BibliographyDatabase(self.gv).scan()
        else:
            # check for stylesheets
            self.gv.check_file_exists(self.gv.docx_style_sheet_dir)
            # metadata file
            gv.metadata_file = self.set_metadata_file()

            self.gv.mk_dir(self.gv.output_folder_path)

            if self.args['doc']:
                # run doc to docx conversion
                # then run docx to tei
                UnoconvToDocx(self.gv).run('doc')
                DocxToTei(self.gv).run(True, self.args['--proprietary'])
            elif self.args['odt']:
                # run odt to docx conversion
                # then run docx to tei
                UnoconvToDocx(self.gv).run('odt')
                DocxToTei(self.gv).run(True, self.args['--proprietary'])
            elif self.args['other']:
                # run other unoconv-supported format to docx conversion
                # then run docx to tei
                UnoconvToDocx(self.gv).run('unoconv')
                DocxToTei(self.gv).run(True, self.args['--proprietary'])
            elif self.args['docx']:
                # run docx to tei conversion
                # includes hooks for proprietary transforms if enabled
                DocxToTei(self.gv).run(True, self.args['--proprietary'])
            elif self.args['docxextracted']:
                self.debug.print_debug(self, u'Skipping docx extraction')
                DocxToTei(self.gv).run(False, self.args['--proprietary'])
            elif self.args['tei']:
                self.debug.print_debug(
                    self, u'Skipping docx extraction; processing TEI file')
                DocxToTei(self.gv).run(False,
                                       self.args['--proprietary'],
                                       tei=True)

            if self.args['--puretei']:
                self.debug.print_debug(self,
                                       u'Exiting as TEI transform complete')
                return

            metadata = Metadata(self.gv)
            metadata.pre_clean()

            # run size classifier
            # aggression 5
            SizeClassifier(self.gv).run()

            # run bibliographic addins handler
            # aggression 4
            found_bibliography = BibliographyAddins(self.gv).run()

            # run list classifier
            # aggression 4
            ListClassifier(self.gv).run()

            bibliography_classifier = BibliographyClassifier(self.gv)

            if not found_bibliography:
                # run bibliographic classifier
                # aggression 4
                bibliography_classifier.run()

            # tei
            # aggression 3
            TeiManipulate(self.gv).run()

            # run tei to nlm conversion
            TeiToNlm(self.gv).run(not found_bibliography)

            if self.gv.settings.args['--purenlm']:
                self.debug.print_debug(self,
                                       u'Exiting as NLM transform complete')
                return

            manipulate = NlmManipulate(self.gv)

            if not self.gv.used_list_method:
                manipulate.fuse_references()

            # run reference linker
            if not (self.args['--nolink']):
                rl = ReferenceLinker(self.gv)
                rl.run(self.args['--interactive'])
                rl.cleanup()

            # run table classifier
            cc = CaptionClassifier(self.gv)
            if int(self.args['--aggression']) > int(
                    self.gv.settings.get_setting(
                        'tablecaptions', self, domain='aggression')):
                cc.run_tables()

            if int(self.args['--aggression']) > int(
                    self.gv.settings.get_setting(
                        'graphiccaptions', self, domain='aggression')):
                cc.run_graphics()

            # run metadata merge
            metadata.run()

            if self.args['--interactive']:
                bibliography_classifier.run_prompt(True)

            # process any bibliography entries that are possible
            BibliographyDatabase(self.gv).run()

            # remove stranded titles and cleanup
            manipulate.final_clean()

            if self.args['--identifiers']:
                IdGenerator(self.gv).run()

            if self.args['--chain']:
                # construct and run an XSLT chainer
                XslChain(self.gv).run()

            if self.args['--clean']:
                ComplianceEnforcer(self.gv).run()
Beispiel #18
0
 def __init__(self, _favorites, _resumepoints):
     """Constructor for the ApiHelper class"""
     self._favorites = _favorites
     self._resumepoints = _resumepoints
     self._metadata = Metadata(_favorites, _resumepoints)
Beispiel #19
0
    if args.md_path == 'None':
        deconvolved_path = os.path.join(args.analysis_path, 'deconvolved')
        if not os.path.exists(deconvolved_path):
            print('analysis path doesnt have your metadata')
            print('add path to metadata after analysis path')
    else:
        deconvolved_path = args.md_path
    bead_path = os.path.join(args.analysis_path, 'beads')
    if not os.path.exists(bead_path):
        os.makedirs(bead_path)
    results_path = os.path.join(args.analysis_path, 'results')
    if not os.path.exists(results_path):
        os.makedirs(results_path)

    #Retreiving file names and position names
    md = Metadata(deconvolved_path)
    posnames = md.image_table.Position.unique()
    posnames = [pos for pos in posnames if "Pos" in pos]
    hybe_list = [
        'hybe1', 'hybe2', 'hybe3', 'hybe4', 'hybe5', 'hybe6', 'hybe7', 'hybe8',
        'hybe9'
    ]
    fnames_dicts = [
        md.stkread(Channel='DeepBlue',
                   Position=pos,
                   fnames_only=True,
                   groupby='acq',
                   hybe=hybe_list) for pos in posnames
    ]
    #should move this to a better place probably just replace pyspots ave bead and import it
    Ave_Bead = pickle.load(
Beispiel #20
0
if __name__ == '__main__':
    os.environ['MKL_NUM_THREADS'] = '4'
    os.environ['GOTO_NUM_THREADS'] = '4'
    os.environ['OMP_NUM_THREADS'] = '4'
    print(args)
    # Assuming these get imported during call below:
    # 1. bitmap
    # 2. bids, blanks, gids, cwords, gene_codeword_vectors, blank_codeword_vectors
    # 3. norm_gene_codeword_vectors, norm_blank_codeword_vectors
    cstk_path = args.cstk_path
    ncpu = args.ncpu
    niter = args.niter
    mask_type = args.mask_type
    new_mask = args.new_mask
    md = Metadata(args.md_path)

    seqfish_config = importlib.import_module(args.cword_config)
    bitmap = seqfish_config.bitmap
    normalized_gene_vectors = seqfish_config.norm_gene_codeword_vectors

    hybedatas = [(i, HybeData(os.path.join(cstk_path, i)))
                 for i in os.listdir(cstk_path)
                 if os.path.isdir(os.path.join(cstk_path, i))]

    # Note preceding blocks and this can be noisy if restarted after crash etccc
    # Note preceding blocks and this can be noisy if restarted after crash etccc
    with multiprocessing.Pool(ncpu) as ppool:
        failed_positions = []
        for i in range(niter):
            print('N Positions left: ', len(hybedatas))
Beispiel #21
0
 def __init__(self):
     ''' Initializes TV-guide object '''
     self._favorites = Favorites()
     self._resumepoints = ResumePoints()
     self._metadata = Metadata(self._favorites, self._resumepoints)
     install_opener(build_opener(ProxyHandler(get_proxies())))
Beispiel #22
0
def main():
    args = parse_args()
    init_log(args.log)
    with open(args.app) as app_file:
        # parse and validate the requested data application JSON file
        application = Application(app_file)
        logging.info("Input data application parsed: {}".format(args.app))
        # Create output directory for the results
        application_dir = create_app_dir(application)
        # check what data types are allowed for this application
        allowed_data_types = application.allowed_data_types()
        logging.info("Allowed data types: {}".format(
            ' '.join(allowed_data_types)))
        if len(allowed_data_types) > 0:
            # Get all the sample metadata for all requested cohorts
            requested_cohorts = application.cohorts()
            metadata = Metadata(args.data, requested_cohorts)
            logging.info("Metadata collected for requested cohorts: {}".format(
                ' '.join(requested_cohorts)))
            metadata_sample_ids = sorted(metadata.get_sample_ids())
            logging.info("Metadata for sample IDs: {}".format(
                ' '.join(metadata_sample_ids)))
            # Filter the sample metadata based on patient consent
            metadata.filter_consent(args.consent, allowed_data_types)
            logging.warning("Consent not handled yet. FIXME")
            # Find all the file paths for requested file types for each
            # consented sample
            requested_file_types = application.file_types()
            logging.info("Requested file types: {}".format(
                ' '.join(requested_file_types)))
            fastqs, bams, bais, vcfs = get_files(args.data,
                                                 requested_file_types,
                                                 metadata)
            logging.info("VCF files selected:\n{}".format('\n'.join(vcfs)))
            logging.info("BAM files selected:\n{}".format('\n'.join(bams)))
            logging.info("BAI files selected:\n{}".format('\n'.join(bais)))
            logging.info("FASTQ files selected:\n{}".format('\n'.join(fastqs)))
            output_files = []
            if 'Anonymised' in allowed_data_types:
                # generate random IDs for all output samples
                randomised_ids = make_random_ids(args.usedids,
                                                 metadata.sample_ids)
                metadata.anonymise(randomised_ids)
                metadata.write(args.metaout)
                logging.info("Anonymised metadata written to: {}".format(
                    args.metaout))
                new_vcfs = anonymise_files(vcfs, randomised_ids,
                                           application_dir, VCF_filename,
                                           vcf_edit)
                new_bams = anonymise_files(bams, randomised_ids,
                                           application_dir, BAM_filename,
                                           bam_edit)
                # BAIs and FASTQs are just sym-linked to output with randomised name
                new_bais = anonymise_files(bais, randomised_ids,
                                           application_dir, BAI_filename)
                new_fastqs = anonymise_files(fastqs, randomised_ids,
                                             application_dir, FASTQ_filename)
                output_files.extend(new_vcfs + new_bams + new_bais +
                                    new_fastqs)
                logging.info("Output files are anonymised")
            elif 'Re-identifiable' in allowed_data_types:
                new_links = link_files(application_dir,
                                       vcfs + bams + bais + fastqs)
                output_files.extend(new_links)
                logging.info(
                    "Files linked in directory: {}".format(application_dir))
                metadata.write(args.metaout)
                logging.info("Output files are re-identifiable")
            else:
                print_error(
                    "Allowed data is neither anonymised nor re-identifiable")
                exit(ERROR_BAD_ALLOWED_DATA)
            logging.info("Generating MD5 checksums on output files")
            md5_files(args.md5, output_files)
        else:
            logging.warning("No data available for this application")
Beispiel #23
0
 def FromChat(chat, min_period, max_period, logger):
     meta = Metadata(chat.id, chat.type, get_chat_title(chat))
     vocab = Generator()
     return Reader(meta, vocab, min_period, max_period, logger)
 def test_hash_length(self):
     """
     Tests the length of the md5 hash
     """
     meta = Metadata(0, 0, 0, DIGEST)
     self.assertTrue(len(DIGEST) == len(meta.hash))
        print('Starting...', 'already done ', str(len(doneso)), ', but ', str(len(all_images)), 'left')
        with open(os.open(fn, os.O_CREAT | os.O_WRONLY, 0o775), 'a') as f:
            for result in p.imap(pfunc, all_images, chunksize=chunksize):
                f.write(str(result)+'\n')
        tend = time.time()
    print('Finished processing ', str(len(all_images)), 'images in ', str(tend-tstart), 'seconds.')


if __name__ == "__main__":
    ncpu = args.ncpu
    md_path = args.md_path
    out_path = args.out_path
    if isinstance(ncpu, list):
        assert(len(ncpu)==1)
        ncpu = ncpu[0]
    md = Metadata(md_path)
    print('Number of images total: ', len(md.image_table.filename))
    base_path = md.base_pth
    if not base_path[-1]=='/':
        base_path=base_path+'/'
    #print(base_path)
    fn = os.path.join(out_path, 'processing.log')
    chunksize=100
    os.environ['MKL_NUM_THREADS'] = '1'
    os.environ['GOTO_NUM_THREADS'] = '1'
    os.environ['OMP_NUM_THREADS'] = '1'
    #print(out_path)
    if not os.path.exists(out_path):
        os.makedirs(out_path)
    process_wrapper(md_path, fn, ncpu, chunksize)
Beispiel #26
0
 def __init__(self, _favorites, _resumepoints):
     """Constructor for the ApiHelper class"""
     self._favorites = _favorites
     self._resumepoints = _resumepoints
     self._metadata = Metadata(_favorites, _resumepoints)
     install_opener(build_opener(ProxyHandler(get_proxies())))