示例#1
0
def main():
    parser = argparse.ArgumentParser(description='Formatter for C language')
    parser.add_argument('--train', dest='train', type=str, help='Train classifier on that folder')
    parser.add_argument('--classifier_filename', dest='classifier_filename', type=str, default='linux_classificators.data',
                        help='In which file save classifier')
    parser.add_argument('--clftab', dest='clftab', type=str,
                        choices=['solving_tree', 'kneighbors', 'svm', 'random_forest'], default='random_forest',
                        help='Type of classifier for tabs')
    parser.add_argument('--clfspace', dest='clfspace', type=str,
                        choices=['solving_tree', 'kneighbors', 'svm', 'random_forest'], default='random_forest',
                        help='Type of classifier for spaces')
    parser.add_argument('--clfnl', dest='clfnewline', type=str,
                        choices=['solving_tree', 'kneighbors', 'svm', 'random_forest'], default='random_forest',
                        help='Type of classifier for newlines')
    parser.add_argument('--load_clfs', dest='clfs', type=str, help='Load previously saved classifiers')
    parser.add_argument('file', help='File to process', nargs='?')
    args = parser.parse_args()

    clfs = None
    if args.train is not None:
        clfs = learning.generate_classifiers(args)
        dumper.dump(clfs, learning.vectorizer, args.classifier_filename)

    elif args.clfs is not None:
        data = dumper.load(args.clfs)
        clfs = data['classifiers']
        learning.vectorizer = data['vectorizer']

    if args.file is not None:
        print("Processing file...")
        data = learning.format_file(args.file, clfs['newline'], clfs['space'], clfs['tab'])
        print(data)
示例#2
0
文件: clsURL-orig.py 项目: panlm/NTNX
 def DeleteURL(self):
     #response.text type is unicode; json.dumps type is str
     try:
         response = requests.delete(self.vURL, headers=self.vHeaders, verify=False, timeout=15)
         response.raise_for_status()
     except requests.exceptions.RequestException as e:  # This is the correct syntax
         print e
         #full = json.loads(unicode(response.text))
         #t1 = pyjq.all('. | { message }', full)
         #t2 = pyjq.all('.error_code | { code }', full)
         #print 'error(%s): %s' % (t2[0]['code'], t1[0]['message'])
         #sys.exit(9)
     #print json.dumps(response.json(), indent=2)
     if self.vIsDebug:
         dumper.dump(response)
     if response.text:
         return {
             'status_code': response.status_code,
             'text': response.json()
         }
     else:
         return {
             'status_code': response.status_code,
             'text': 'NULL'
         }
示例#3
0
 def contextMenuEvent(self, event):
     dumper.dump(event)
     contextMenu = QtWidgets.QMenu(self)
     newAct = contextMenu.addAction("New")
     openAct = contextMenu.addAction("Open")
     quitAct = contextMenu.addAction("Quit")
     action = contextMenu.exec_(self.mapToGlobal(event.pos()))
     if action == quitAct:
         self.close()
示例#4
0
def main():
    Great_Room = LogiHub()
    current = Great_Room.hub.currentactivity()
    print(current)
    #try turning volume down
    #Great_Room.hub.sendkey(device = '60692019',key = 'VolumeDown')
    # Great_Room.get_hub()
    # print out configuration
    config = Great_Room.hub.getconfig()
    #volume up
    dumper.dump(config['activity'][4]["controlGroup"][1]['function'][1])
示例#5
0
def jsonpath_value(obj, path):
    if '.' in path:
        try:
            elem = parse(path).find(obj)[0].value
        except IndexError as e:
            print(f'{e}: {path}')
            dump(obj)
            elem = None
    else:
        elem = obj.get(path)
    return elem
示例#6
0
def run2(opts):
    try:
        api = finding(debug=opts.debug, appid=opts.appid, config_file=opts.yaml)
        
        response = api.execute('findItemsByProduct', 
          '<productId type="ReferenceID">53039031</productId><paginationInput><entriesPerPage>1</entriesPerPage></paginationInput>')
        
        dump(api)

    except ConnectionError as e:
        print(e)
        print(e.response.dict())
示例#7
0
def categoryInfo(opts):

    try:
        api = Shopping(debug=opts.debug, appid=opts.appid, config_file=opts.yaml,
                       warnings=True)

        response = api.execute('GetCategoryInfo', {'CategoryID': '63863'})

        dump(api, full=False)
    
    except ConnectionError as e:
        print(e)
        print(e.response.dict())
示例#8
0
def main():

    options = parse_command_line()

    OAUTH = utils.AUTH(options.server)

    if options.debug:
        dumper.dump (OAUTH)
        return

    ok_to_proceed = ( hasattr(OAUTH,'user') and
                      hasattr(OAUTH,'password') and
                      hasattr(OAUTH,'url') and hasattr(OAUTH,'wsdl'))

    if ok_to_proceed:

        url = urljoin(getattr(OAUTH,'url'), getattr(OAUTH,'wsdl'), allow_fragments=True)

        server = SOAPpy.WSDL.Proxy(url)

        if server is not None:

            token = authentication( server = server,
                                    user = getattr(OAUTH,'user'),
                                    password = getattr(OAUTH,'password'))


            if token is not None:

                try:
                    objects = DISPATCH[options.action]( server = server,
                                                        token  = token,
                                                        id     = options.id,
                                                        debug  = options.debug )

                    if objects is not None:
                        print objects

                except InsufficientArguments as e:
                    raise SystemExit(e)
                except AuthenticationException as e:
                    raise SystemExit(e)
                except Exception as e:
                    raise SystemExit(e)

            else:
                SystemExit('Failed to get token')
        else:
            SystemExit('Failed to create server')

    return
 def __init__(self, thisyear):
     self.thisyear = thisyear
     try:
         self.jourlist = self.getJourlist(
         )  # get the day numbers + dates as dict
         self.etudes = self.getEtudelist(
         )  # get the schedule of study periods
         self.datelist = self.makeDatelist(
         )  # produce datelist for ical input
         ical.ICAL.__init__(
             self, self.datelist.split('\n'))  # prime ical class with data
     except:
         import dumper
         dumper.dump(self)  # do a big data dump!
示例#10
0
    def match(self):
        matcher = audfprint_match.Matcher()
        matcher.find_time_range = True
        matcher.verbose = 1
        matcher.max_returns = 100

        analyzer = audfprint_analyze.Analyzer()
        analyzer.n_fft = 512
        analyzer.n_hop = analyzer.n_fft / 2
        analyzer.shifts = 1
        # analyzer.exact_count = True
        analyzer.density = 20.0
        analyzer.target_sr = 11025

        hash_tab = hash_table.HashTable("./samples.pklz")
        hash_tab.params['samplerate'] = analyzer.target_sr

        qry = "./Samples/viral.afpt"
        rslts, dur, nhash = matcher.match_file(analyzer, hash_tab,
                                               "./Samples/viral.afpt", 0)
        t_hop = analyzer.n_hop / float(analyzer.target_sr)
        qrymsg = qry + (' %.1f ' % dur) + "sec " + str(nhash) + " raw hashes"

        msgrslt = []
        if len(rslts) == 0:
            nhashaligned = 0
            msgrslt.append("NOMATCH " + qrymsg)
        else:
            for (tophitid, nhashaligned, aligntime, nhashraw, rank, min_time,
                 max_time) in rslts:
                # msg = ("Matched {:6.1f} s starting at {:6.1f} s in {:s}"
                #            " to time {:6.1f} s in {:s}").format(
                #         (max_time - min_time) * t_hop, min_time * t_hop, qry,
                #         (min_time + aligntime) * t_hop, hash_tab.names[tophitid])
                msg = (
                    "Matched {:6.1f} s starting at {:6.1f} s in {:s}"
                    " to time {:6.1f} s in {:n}; max {:6.1f} min {:6.1f} align {:6.1f} hop {:6.1f}"
                ).format(
                    (max_time - min_time) * t_hop,
                    min_time * t_hop,
                    qry,
                    (min_time + aligntime) * t_hop,
                    tophitid,  #),
                    max_time * t_hop,
                    min_time * t_hop,
                    aligntime * t_hop,
                    t_hop)

                msgrslt.append(msg)
        dumper.dump(msgrslt)
示例#11
0
def do_dump_scalars():
    dump(1)
    dump('a')
    dump("foo")
    dump('''string
with a newline''')
    return "1'a''foo''string\\nwith a newline'"
示例#12
0
 def store(self):
     now = datetime.datetime.now()
     total = 0
     for site in self.sites:
         v = getattr(self, site)
         m = v()
         num = m.replace("$", "")
         total += Decimal(num)
         site_list = dumper.load(site, silent=True, path=BASE_DIR) or []
         site_list.append({'site': site, 'datetime': now, 'total': num})
         dumper.dump(site_list, site, path=BASE_DIR)
     total_list = dumper.load('total', silent=True, path=BASE_DIR) or []
     total_list.append({'site': total, 'datetime': now, 'total': total})
     dumper.dump(total_list, "total", path=BASE_DIR)
     print "store file {}".format(now)
示例#13
0
 def newlistbox(self,choice):
     if choice == 1:
         dumper.dump(self.nimilista)
         self.nimilista = {}
         a = tkinter.simpledialog.askstring('Anna etsittävän tiedoston nimi', 'Tiedoston nimi:')
         self.nimilista = dumper.specificload(a)
         if self.nimilista == {}:
             tkinter.messagebox.showinfo('File not found','Specific file not found')
             self.nimilista = dumper.load()
         self.listbox.delete(0,'end')
         for i in self.nimilista:
             self.listbox.insert('end',i)
     if choice == 2:
         a = tkinter.simpledialog.askstring('Anna tallennettavan tiedoston nimi','Tiedoston nimi: ')
         dumper.specificdump(self.nimilista,a)
示例#14
0
def main():
    parser = argparse.ArgumentParser(
        description='run solver',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('input_file_paths',
                        nargs='+',
                        type=str,
                        help='input file paths e.g. a_example.in')
    parser.add_argument('--out',
                        dest="output_file_path",
                        default='out',
                        type=str,
                        help='output file path e.g. a_example.out')
    parser.add_argument('--solver',
                        type=str,
                        default='dummy',
                        help='solver name e.g. <solver>.py')
    parser.add_argument('--debug',
                        action='store_true',
                        help='for debug purpose')
    args = parser.parse_args()
    set_log_level(args)

    for input_file_path in args.input_file_paths:
        start = timer()
        images = parse(input_file_path)
        end = timer()

        logging.debug('parsing input took %s s', end - start)

        start = timer()
        solver = get_solver(args.solver)
        slides = solver.solve(images)
        end = timer()

        logging.debug('solving took %s s', end - start)

        start = timer()
        file_name, _ = os.path.splitext(input_file_path)
        dump(slides, file_name + '.out')
        end = timer()

        logging.debug('dumping took %s s', end - start)
示例#15
0
def run_unicode(opts):

    try:
        api = finding(debug=opts.debug, appid=opts.appid,
                      config_file=opts.yaml, warnings=True)

        api_request = {
            'keywords': u'Kościół',
        }

        response = api.execute('findItemsAdvanced', api_request)
        for i in response.reply.searchResult.item:
            if i.title.find(u'ś') >= 0:
                print("Matched: %s" % i.title)
                break

        dump(api)

    except ConnectionError as e:
        print(e)
        print(e.response.dict())
示例#16
0
 def handle_item(ctx, drive_file, path):
     file_attr = FileAttr(drive_file, ctx.user)
     file_attr.compare_metadata_to_local_file(drive_file)
     file_attr.compare_YAML_metadata_MD5(drive_file)
     if FLAGS.revisions:
         verify_revisions(ctx, drive_file)
     if drive_file['status'] == 'INVALID':
         print(22 * '_', ' drive_file ', dirent.name)
         dump(drive_file)
         print(11 * '_', ' file attr ', dirent.name)
         dump(file_attr)
     data = jsonpath_list(drive_file, metadata_names)
     if writer:
         writer.writerow(data)
     if output_format:
         print(
             output_format.format(
                 *[str(i) for i in data]).rstrip())
     if (FLAGS.diffs and drive_file.get('yamlMD5Match')
             == 'MISMATCH'):
         print_obj_diffs(drive_file,
                         file_attr.metadata_file)
示例#17
0
def enumerate(base, pos, arch):
    version = int(arch.split("_")[1])
    for i in range(1 << len(pos)):
        bits = 0x0
        enc = base
        # Expresss i in binary
        for j in range(len(pos)):
            bits = (((i >> j) & 0x1) << pos[j]) | bits
            enc = enc & (~(1 << pos[j]))
        dump_file = dump("0x{:016x}".format(enc | bits), arch)
        if dump_file and dump_file.find("?") == -1 and dump_file.find("error") == -1 and dump_file.find("INVALID") == -1:
            line = dump_file.split("\n")
            if version < 40:
                line_inst = line[1].split();
            else:
                line_inst = line[5].split();
            line_inst.pop(0)
            logging.info("0b{:064b}".format(bits) + ": " + " ".join(line_inst))
示例#18
0
def pull(package, dest):
    try:
        # Directory pull to be implemented in ppadb, using standalone binary
        # output = self.device.pull("/data/data/%s %s" % (package, dest))
        try:
            cmd = ['adb', 'pull', "/data/data/%s" % package, dest]
            adb = subprocess.Popen(cmd)
            adb.wait()
        except:
            print("[-] Error while dumping app data.")

        filelist = dumper.fast_scandir(dest)
        dumper.dump(filelist,'xml',dest)
        dumper.dump(filelist,'sqlite',dest)
        dumper.dump(filelist,'json',dest)

    except FileNotFoundError as e:
        print(e)
示例#19
0
    def annotate(self, conllinput=None, language=None):
        """ Run the semantic role labelling

        :var conllinput: string -- text to annotate formated in the CoNLL
                                   format. If None, annotate

        Return the same string than conllinput but with new colums
        corresponding to the frames and roles found
        """
        self.logger.info("annotate: {} {}".format(language, conllinput))
        model = probabilitymodel.ProbabilityModel(self.verbnet_classes, 0)
        if language is not None:
            options.Options.language = language

        tmpfile = None
        if conllinput is not None:
            if options.Options.debug:
                tmpfile = tempfile.NamedTemporaryFile(delete=False)
                self.logger.error('Debug mode: will not delete temporary file {}'.format(tmpfile.name))
            else:
                tmpfile = tempfile.NamedTemporaryFile(delete=True)
            tmpfile.write(bytes(conllinput, 'UTF-8'))
            tmpfile.seek(0)
            options.Options.conll_input = tmpfile.name
            options.Options.argument_identification = True
        # self.logger.debug("Annotating {}...".format(conllinput[0:50]))
        all_annotated_frames = []
        all_vn_frames = []

        self.logger.info("Loading gold annotations "
                         "and performing frame matching...")
        # annotated_frames: list of FrameInstance
        # vn_frames: list of VerbnetFrameOccurrence
        for annotated_frames, vn_frames in corpuswrapper.get_frames(
                options.Options.corpus,
                self.verbnet_classes,
                self.frameNet,
                options.Options.argument_identification):
            all_matcher = []
            #
            # Frame matching
            #
            data_restr = NoHashDefaultDict(lambda: Counter())
            assert len(annotated_frames) == len(vn_frames)

            # gold_frame: FrameInstance
            # frame_occurrence: VerbnetFrameOccurrence
            for gold_frame, frame_occurrence in zip(annotated_frames,
                                                    vn_frames):
                if gold_frame.predicate.lemma not in self.frames_for_verb:
                    errorslog.log_vn_missing(gold_frame)
                    continue

                stats.stats_data["frames_with_predicate_in_verbnet"] += 1

                stats.stats_data["args"] += len(gold_frame.args)
                stats.stats_data["args_instanciated"] += len(
                    [x for x in gold_frame.args if x.instanciated])

                num_instanciated = len(
                    [x for x in gold_frame.args if x.instanciated])
                predicate = gold_frame.predicate.lemma

                if gold_frame.arg_annotated:
                    stats.stats_data["args_kept"] += num_instanciated

                stats.stats_data["frames"] += 1

                # Check that FrameNet frame slots have been mapped to
                # VerbNet-style slots
                if frame_occurrence.num_slots == 0:
                    errorslog.log_frame_without_slot(gold_frame,
                                                     frame_occurrence)
                    frame_occurrence.matcher = None
                    continue

                errorslog.log_frame_with_slot(gold_frame, frame_occurrence)
                stats.stats_data["frames_mapped"] += 1

                matcher = framematcher.FrameMatcher(frame_occurrence,
                                                    options.Options.
                                                    matching_algorithm)
                frame_occurrence.matcher = matcher
                all_matcher.append(matcher)

                frames_to_be_matched = []
                for verbnet_frame in sorted(self.frames_for_verb[predicate]):
                    if options.Options.passivize and gold_frame.passive:
                        for passivized_frame in verbnet_frame.passivize():
                            frames_to_be_matched.append(passivized_frame)
                    else:
                        frames_to_be_matched.append(verbnet_frame)

                # Actual frame matching
                matcher.perform_frame_matching(frames_to_be_matched)

                if options.Options.wordnetrestr:
                    matcher.restrict_headwords_with_wordnet()

                # Update semantic restrictions data (but take no decision)
                for i, restr in matcher.get_matched_restrictions():
                    word = frame_occurrence.headwords[i]['top_headword']
                    if restr.logical_rel == "AND":
                        for subrestr in restr.children:
                            data_restr[subrestr].update([word])
                    else:
                        data_restr[restr].update([word])

                # Update probability model data (but take no decision)
                vnclass = model.add_data_vnclass(matcher)
                if not options.Options.bootstrap:
                    for roles, slot_type, prep in zip(
                        frame_occurrence.roles, frame_occurrence.slot_types,
                        frame_occurrence.slot_preps
                    ):
                        if len(roles) == 1:
                            model.add_data(slot_type, next(iter(roles)),
                                                prep, predicate, vnclass)

                if options.Options.debug and set() in frame_occurrence.roles:
                    log_debug_data(gold_frame, frame_occurrence, matcher,
                                   frame_occurrence.roles,
                                   self.verbnet_classes)

            if options.Options.semrestr:
                for matcher in all_matcher:
                    matcher.handle_semantic_restrictions(data_restr)

            all_vn_frames.extend(vn_frames)
            all_annotated_frames.extend(annotated_frames)

        #
        # Probability models
        #
        self.logger.info("Probability models...")
        if options.Options.bootstrap:
            self.logger.info("Applying bootstrap...")
            bootstrap_algorithm(all_vn_frames, model,
                                self.verbnet_classes)
        elif options.Options.probability_model is not None:
            self.logger.info("Applying probability model...")
            for frame_occurrence in all_vn_frames:
                # Commented out a version that only allowed possible role
                # combinations after each restriction
                # for i in range(frame_occurrence.num_slots):
                #     roles_for_slot = frame_occurrence.roles[i]
                for i, roles_for_slot in enumerate(frame_occurrence.roles):
                    if len(roles_for_slot) > 1:
                        new_role = model.best_role(
                            roles_for_slot,
                            frame_occurrence.slot_types[i],
                            frame_occurrence.slot_preps[i],
                            frame_occurrence.predicate,
                            options.Options.probability_model)
                        if new_role is not None:
                            frame_occurrence.restrict_slot_to_role(i, new_role)
                frame_occurrence.select_likeliest_matches()

            if options.Options.debug:
                display_debug(options.Options.n_debug)
        else:
            self.logger.info("No probability model")

        if options.Options.conll_input is not None:
            self.logger.info("\n## Dumping semantic CoNLL...")
            semantic_appender = ConllSemanticAppender(options.Options.
                                                      conll_input)
            # vn_frame: VerbnetFrameOccurrence
            for vn_frame in all_vn_frames:
                if vn_frame.best_classes():
                    if options.Options.framelexicon == FrameLexicon.VerbNet:
                        semantic_appender.add_verbnet_frame_annotation(vn_frame)  # noqa
                    elif options.Options.framelexicon == FrameLexicon.FrameNet:
                        semantic_appender.add_framenet_frame_annotation(
                            self.role_matcher.possible_framenet_mappings(vn_frame))  # noqa
                    else:
                        self.logger.error("Error: unknown frame lexicon for "
                                          "output {}".format(options.Options.
                                                             framelexicon))
            if options.Options.conll_output is None:
                self.logger.debug('\n{}'.format(str(semantic_appender)))
                return str(semantic_appender)
            else:
                semantic_appender.dump_semantic_file(options.Options.
                                                     conll_output)

        else:
            self.logger.info("\n## Evaluation")
            stats.stats_quality(
                all_annotated_frames, all_vn_frames,
                self.frames_for_verb, self.verbnet_classes,
                options.Options.argument_identification)
            stats.display_stats(options.Options.argument_identification)

            if options.Options.dump:
                dumper.dump(options.Options.dump_file,
                            stats.annotated_frames_stats)

        return ""
示例#20
0
    parser.add_argument('--url', action="store", dest='vURL', default='')
    parser.add_argument('--urltype', action="store", dest='vURLType', default='tasks')
    param = parser.parse_args()

    if param.vTaskUUID == '':
        print "Usage: " + sys.argv[0] + " --taskuuid="
        sys.exit(1)

    data = GetURL(
        debug=param.vIsDebug, 
        hostname=param.vHostname, port=param.vPort, 
        username=param.vUsername, password=param.vPassword, 
        snapshotuuid=param.vSnapshotUUID, 
        taskuuid=param.vTaskUUID, 
        imageuuid=param.vImageUUID, 
        url=param.vURL, urltype=param.vURLType
        )
    d = json.loads(str(data))
    if 'myerror' in d:
        print json.dumps(d, indent=4) 
    else:
        if param.vIsDebug:
            print d['uuid'], d['operation_type'], d['progress_status']
            print json.dumps(d)
            dumper.dump(d)
        
        table = PrettyTable(['UUID', 'Operation_Type', 'Progress_Status'])
        table.add_row([d['uuid'], d['operation_type'], d['progress_status']])
        print table

    def annotate(self, conllinput=None, language=None):
        """ Run the semantic role labelling

        :var conllinput: string -- text to annotate formated in the CoNLL
                                   format. If None, annotate

        Return the same string than conllinput but with new colums
        corresponding to the frames and roles found
        """
        self.logger.info("annotate: {} {}".format(language, conllinput))
        model = probabilitymodel.ProbabilityModel(self.verbnet_classes, 0)
        if language is not None:
            options.Options.language = language

        tmpfile = None
        if conllinput is not None:
            if options.Options.debug:
                tmpfile = tempfile.NamedTemporaryFile(delete=False)
                self.logger.error(
                    'Debug mode: will not delete temporary file {}'.format(
                        tmpfile.name))
            else:
                tmpfile = tempfile.NamedTemporaryFile(delete=True)
            tmpfile.write(bytes(conllinput, 'UTF-8'))
            tmpfile.seek(0)
            options.Options.conll_input = tmpfile.name
            options.Options.argument_identification = True
        # self.logger.debug("Annotating {}...".format(conllinput[0:50]))
        all_annotated_frames = []
        all_vn_frames = []

        self.logger.info("Loading gold annotations "
                         "and performing frame matching...")
        # annotated_frames: list of FrameInstance
        # vn_frames: list of VerbnetFrameOccurrence
        for annotated_frames, vn_frames in corpuswrapper.get_frames(
                options.Options.corpus, self.verbnet_classes, self.frameNet,
                options.Options.argument_identification):
            all_matcher = []
            #
            # Frame matching
            #
            data_restr = NoHashDefaultDict(lambda: Counter())
            assert len(annotated_frames) == len(vn_frames)

            # gold_frame: FrameInstance
            # frame_occurrence: VerbnetFrameOccurrence
            for gold_frame, frame_occurrence in zip(annotated_frames,
                                                    vn_frames):
                if gold_frame.predicate.lemma not in self.frames_for_verb:
                    errorslog.log_vn_missing(gold_frame)
                    continue

                stats.stats_data["frames_with_predicate_in_verbnet"] += 1

                stats.stats_data["args"] += len(gold_frame.args)
                stats.stats_data["args_instanciated"] += len(
                    [x for x in gold_frame.args if x.instanciated])

                num_instanciated = len(
                    [x for x in gold_frame.args if x.instanciated])
                predicate = gold_frame.predicate.lemma

                if gold_frame.arg_annotated:
                    stats.stats_data["args_kept"] += num_instanciated

                stats.stats_data["frames"] += 1

                # Check that FrameNet frame slots have been mapped to
                # VerbNet-style slots
                if frame_occurrence.num_slots == 0:
                    errorslog.log_frame_without_slot(gold_frame,
                                                     frame_occurrence)
                    frame_occurrence.matcher = None
                    continue

                errorslog.log_frame_with_slot(gold_frame, frame_occurrence)
                stats.stats_data["frames_mapped"] += 1

                matcher = framematcher.FrameMatcher(
                    frame_occurrence, options.Options.matching_algorithm)
                frame_occurrence.matcher = matcher
                all_matcher.append(matcher)

                frames_to_be_matched = []
                for verbnet_frame in sorted(self.frames_for_verb[predicate]):
                    if options.Options.passivize and gold_frame.passive:
                        for passivized_frame in verbnet_frame.passivize():
                            frames_to_be_matched.append(passivized_frame)
                    else:
                        frames_to_be_matched.append(verbnet_frame)

                # Actual frame matching
                matcher.perform_frame_matching(frames_to_be_matched)

                if options.Options.wordnetrestr:
                    matcher.restrict_headwords_with_wordnet()

                # Update semantic restrictions data (but take no decision)
                for i, restr in matcher.get_matched_restrictions():
                    word = frame_occurrence.headwords[i]['top_headword']
                    if restr.logical_rel == "AND":
                        for subrestr in restr.children:
                            data_restr[subrestr].update([word])
                    else:
                        data_restr[restr].update([word])

                # Update probability model data (but take no decision)
                vnclass = model.add_data_vnclass(matcher)
                if not options.Options.bootstrap:
                    for roles, slot_type, prep in zip(
                            frame_occurrence.roles,
                            frame_occurrence.slot_types,
                            frame_occurrence.slot_preps):
                        if len(roles) == 1:
                            model.add_data(slot_type, next(iter(roles)), prep,
                                           predicate, vnclass)

                if options.Options.debug and set() in frame_occurrence.roles:
                    log_debug_data(gold_frame, frame_occurrence, matcher,
                                   frame_occurrence.roles,
                                   self.verbnet_classes)

            if options.Options.semrestr:
                for matcher in all_matcher:
                    matcher.handle_semantic_restrictions(data_restr)

            all_vn_frames.extend(vn_frames)
            all_annotated_frames.extend(annotated_frames)

        #
        # Probability models
        #
        self.logger.info("Probability models...")
        if options.Options.bootstrap:
            self.logger.info("Applying bootstrap...")
            bootstrap_algorithm(all_vn_frames, model, self.verbnet_classes)
        elif options.Options.probability_model is not None:
            self.logger.info("Applying probability model...")
            for frame_occurrence in all_vn_frames:
                # Commented out a version that only allowed possible role
                # combinations after each restriction
                # for i in range(frame_occurrence.num_slots):
                #     roles_for_slot = frame_occurrence.roles[i]
                for i, roles_for_slot in enumerate(frame_occurrence.roles):
                    if len(roles_for_slot) > 1:
                        new_role = model.best_role(
                            roles_for_slot, frame_occurrence.slot_types[i],
                            frame_occurrence.slot_preps[i],
                            frame_occurrence.predicate,
                            options.Options.probability_model)
                        if new_role is not None:
                            frame_occurrence.restrict_slot_to_role(i, new_role)
                frame_occurrence.select_likeliest_matches()

            if options.Options.debug:
                display_debug(options.Options.n_debug)
        else:
            self.logger.info("No probability model")

        if options.Options.conll_input is not None:
            self.logger.info("\n## Dumping semantic CoNLL...")
            semantic_appender = ConllSemanticAppender(
                options.Options.conll_input)
            # vn_frame: VerbnetFrameOccurrence
            for vn_frame in all_vn_frames:
                if vn_frame.best_classes():
                    if options.Options.framelexicon == FrameLexicon.VerbNet:
                        semantic_appender.add_verbnet_frame_annotation(
                            vn_frame)  # noqa
                    elif options.Options.framelexicon == FrameLexicon.FrameNet:
                        semantic_appender.add_framenet_frame_annotation(
                            self.role_matcher.possible_framenet_mappings(
                                vn_frame))  # noqa
                    else:
                        self.logger.error("Error: unknown frame lexicon for "
                                          "output {}".format(
                                              options.Options.framelexicon))
            if options.Options.conll_output is None:
                self.logger.debug('\n{}'.format(str(semantic_appender)))
                return str(semantic_appender)
            else:
                semantic_appender.dump_semantic_file(
                    options.Options.conll_output)

        else:
            self.logger.info("\n## Evaluation")
            stats.stats_quality(all_annotated_frames, all_vn_frames,
                                self.frames_for_verb, self.verbnet_classes,
                                options.Options.argument_identification)
            stats.display_stats(options.Options.argument_identification)

            if options.Options.dump:
                dumper.dump(options.Options.dump_file,
                            stats.annotated_frames_stats)

        return ""
示例#22
0
 def eventhandler(self):
     if tkinter.messagebox.askokcancel('Lopeta?', 'Haluatko lopettaa ja tallentaa muutokset?'):
         dumper.dump(self.nimilista)
         self.mainframe.quit() #Quit pysäyttää suoraan KAIKKI widgetit
示例#23
0
文件: xmper.py 项目: b3tar00t/Xmper
    "\033[1;34;40m[\033[1;31;40m+\033[1;34;40m] \033[1;31;40mGithub : https://github.com/b3tar00t"
)

print(" ")

faker = Faker()

print('\033[1;36;40m ================================================= ')

print(' ')

print(" \033[1;32;40mMale Profile : ")

maleprofile = faker.simple_profile('M')

dumper.dump(maleprofile)

print(' ')

print(' \033[1;36;40m================================================= ')

print(' ')

print(" \033[1;32;40mFemale Profile : ")

femaleprofile = faker.simple_profile('F')

dumper.dump(femaleprofile)

print(' ')
print(" \033[1;32;40mPhone Number : ")
示例#24
0
def do_dump_json():
    obj = {"httpCode": 200, "extensionData": [{"extensionValue": "egg"}]}
    dump(obj)
    return '''
示例#25
0
    1,
    2,
    3,
    4,
    5,
    6,
    7,
])))

# swapping example
num_to_alpha = dict(a=1, b=2, c=3)
print(num_to_alpha)
alpha_to_num = {num: alpha for alpha, num in num_to_alpha.items()}
print(alpha_to_num)
#swaping

for word in ("followed by a newlin at the end of the".split()):
    print(f"Word {word}")

#sets context
print({factorial(x) for x in range(10)})

# dict context
# Double each value in the dictionary
dict1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}
double_dict1 = {k: v * 2 for (k, v) in dict1.items()}
print(dump(double_dict1))
print(dumps(double_dict1))

print([x for x in range(10) if x % 2])
示例#26
0
    finally:
        # reset the buffer for the next test
        buff.truncate(0)
        buff.seek(0)


if __name__ == "__main__":

    l1 = [3, 5, 'hello']
    t1 = ('uh', 'oh')
    l2 = ['foo', t1]
    d1 = {'k1': 'val1', 'k2': l1, 'k2': l2}

    print("a list: ", dumps(l1), "; a tuple: ", dumps(t1))
    print("a complex list: ")
    dump(l2)
    dump(d1)
    print("same dict, printed from dumps(): ")
    print(dumps(d1))
    dump(19)
    dump("\nMy birth year!\n")

    dumper = Dumper(max_depth=1)
    l = ['foo', ['bar', 'baz', (1, 2, 3)]]
    dumper.dump(l)
    dumper.max_depth = 2
    dumper.dump(l)
    l[1][2] = tuple(range(11))
    dumper.dump(l)
    dumper.max_depth = None
    print(dumper.max_depth)
示例#27
0
 def test_dump_invalid_data(self):
     with self.assertRaises(TypeError):
         dump('', self.invalid_data, 0)
示例#28
0
 line_split = line.split()
 # Construct instruction structure
 origin = Inst(line_split)
 # Find the 64-bit encodings
 base = int(origin.enc(), 16)
 origin_operand_types = check_operand_types(origin)
 if len(origin.operands()) and origin_operand_types.find('X') == -1:
     pp = [[] for i in range(len(origin_operand_types))]
     logging.info(origin.op() + " " + origin.modifier())
     logging.info("0b{:064b}".format(base) + ": " +
                  origin.operands())
     for i in range(0, 64):
         mask = 2**i
         newcode = base ^ mask
         # Disassemble the new code
         dump_file = dump("0x{:016x}".format(newcode), arch)
         if dump_file and dump_file.find(
                 "?") == -1 and dump_file.find("error") == -1:
             line = dump_file.split("\n")
             if version < 40:
                 line_inst = line[1].split()
             else:
                 line_inst = line[5].split()
             # [0]: header info, [1] instruction part
             line_inst.pop(0)
             inst = Inst(line_inst, raw=version > 40)
             pos = change(inst, origin)
             if pos >= 0:
                 pp[pos].append(i)
                 logging.info("0b{:064b}".format(newcode) + ": " +
                              inst.operands())
示例#29
0
        print("Se ha encontrado un pepinillo de Coordenadas guardado ¿Desea cargar pepinillo? S/N")
        load = input()
        if load == 'S' or load == 's':
            placesAcc = pickle.load(f)
            f.close()
            print(highlight, "Pepinillo cargadon con exito!!!! :3", reset)
    except:
        print(highlight, "Pepinillo no encontrado ejecucion normal 2", reset)

    if len(placesAcc) == 0:
        for place in globalPlacesList:
            try:
                points = functions.placePoints(place, bingKey)
                place = Place(place, points[0], points[1][0], points[1][1])
                placesAcc.append(place)
                dumper.dump(place)
            except:
                print(error, "Something went wrong", reset)
                continue

        print('---------------------------------Hciendo el pepinillo Places------------------------')
        f = open(r"Places.pckl", 'wb')
        pickle.dump(placesAcc, f)
        f.close()
    # dumper.dump(placesAcc)

    # ----------------------------Third Step --------------------------------------

    for place in placesAcc:  # Lugares donde se cito
        # print(ok, "-", place.place, reset)
        # print(ok, "-", place.longitude, reset)
示例#30
0
文件: getData.py 项目: ajmal017/ib-3
        d['color'] = 'R'
    elif d['open'] < d['close']:
        d['color'] = 'G'
    return d


#

util.logToConsole(logging.INFO)
ib = IB()
ib.connect("localhost", 4002, clientId=2)

contract = Future('ES', '202006', 'GLOBEX')
ib.qualifyContracts(contract)
ticker = ib.reqMktData(contract, '', False, False)
ib.sleep(1)

de = collections.deque()
for i in range(0, 6):
    d = collector(ticker, ib)
    de.append(d)

# run analysis function.  if it passes, place an order as needed
# if not, re-run collection process and re-analyze

ib.cancelMktData(contract)
ib.disconnect()
for i in de:
    dumper.dump(i)
    print("\n")
示例#31
0
def do_dumps_multi_values():
    s = dumps(1, " is less than ", 10)  # returns unicode string in py2
    if sys.version < (3, 0):
        s = s.encode('ascii', 'replace')  # convert back to regular string
    dump(s)
    return "\"1' is less than '10\""
示例#32
0
文件: cmd_maf.py 项目: Excedrin/mafl
def run(bot, command, to, who, args, authed):
    if not command:
        return

    public = to and to[0] == "#"

    noncommand = False

    # in public, cmdchar is necessary
    if public:
        if command[0] == '%':
            command = command[1:]
        else:
            noncommand = True
    else:
        # in private, cmdchar is optional
        if command[0] == '%':
            command = command[1:]

    state = bot.get('maf')

    if command == "reset":
        state = None

    if state == None:
        state = game.Game(rng)

    if noncommand:
        # chatting, active player, reset modkill timer
        state.activity(who)
        return

    if command == "help":
        bot.reply(to, who, "normal commands: %s" % ", ".join(["join","start","go",
                "wait","done","role","testsetup","living","votes","phase","replace",
                "dccchat", "changesetup"]))
        bot.reply(to, who, "mod commands: %s" % ", ".join(["reset",
                "force","forcep","forcenextphase","showsetup","setrole","starttest"]))

    elif public and (command == "join" or command == "start"):
        state.join(to, who, args)

    elif command == "done":
        state.done(who)

    elif command == "role":
        state.fullrolepm(who)

    elif command == "rolepower":
        state.rolepower(who, args)

# mod commands
    elif public and command == "force":
        if len(args) >= 2:
            run(bot, args[1], to, args[0], args[2:], authed)
        state.makefake(who, args)

    elif public and command == "forcep":
        if len(args) >= 2:
            run(bot, args[1], "bot", args[0], args[2:], authed)
        state.makefake(who, args)

    elif public and command == "forcenextphase":
        state.nextphase()

    elif public and command == "replace":
        if len(args) == 2:
            state.replace(args[0], args[1])

    elif public and command == "showsetup" and state.channel:
        state.showsetup(who)
        bot.notice(state.channel, "%s used showsetup"%who)

    elif command == "setrole" and state.channel:
        if state.setrole(who, args):
            bot.notice(state.channel, "%s used setrole"%who)

    elif command == "dump":
        dumper.max_depth = 9
        print(dumper.dump(state))

# informational commands
    elif command == "testsetup":
        bot.reply(to, who, state.testsetup(args))
# public informational commands
    elif public and command == "living":
        state.livingmsg()
    elif public and command == "dead":
        state.deadmsg()
    elif public and command == "votes":
        state.votecount()
    elif public and command == "phase":
        state.phasemsg()
# game start cmds
    elif command == "changesetup":
        state.changesetup(args)
    elif public and command == "wait":
        state.wait()
    elif public and command == "go":
        state.go(args[0] if args else None)

    elif public and command == "starttest":
        state.gotest(to, who, args)

# role commands
    elif command:
        print("game command %s args %s"%(command,args))
        state.tryability(who, public, command, args)

    rungame(bot, public, to, who, state)

    bot.store('maf', state)
示例#33
0
def work(input_file_name, output_file, section_start):
    global sass_content, ops_bits

    arch = "sm_75"
    # for debug
    # input_file_name = 'gaussian.cubin'
    init_sass_cubin_files(input_file_name, arch)
    logging.basicConfig(format="%(message)s",
                        filename="./log/%s" % output_file,
                        filemode="a",
                        level=logging.INFO)
    logging.info("Time:\t%s" %
                 time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))

    com_lib.kernel_section_start_offset = int(section_start, 16)

    with open("%s.sass" % os.path.splitext(input_file_name)[0], 'r') as fin:
        sass_content = fin.read()
    tmp_result = code_line_reg.findall(sass_content)
    if not tmp_result:
        logging.error("Not found pairs")
        return
    # @todo: check is there any irregular code
    # how many lines will be checked.
    for j, line in enumerate(tmp_result[:1]):
        logging.info("================================")
        logging.info("raw line:\t\t%s" % line[0])
        a_origin_inst = Inst(line)
        instructions.append(a_origin_inst)

        base = a_origin_inst.enc
        bits = 0x0
        origin_operand_types = check_operand_types(a_origin_inst)
        if FLAG_CHECK_OPERAND:
            if len(a_origin_inst.operands) and origin_operand_types:
                logging.info(
                    "Original op and modifier:%s:\t%s" %
                    (a_origin_inst.op, "".join(a_origin_inst.modifier)))
                logging.info("0b{:0128b}".format(base) + ": " +
                             "".join(a_origin_inst.operands))
                logging.info("newcode operand:")
            # if you just want to check operand, uncomment the following else branch
            else:
                continue
        # In volta and turing, len of the instruction code is 128bitk
        for i in range(0, 128):
            # i from right to left
            mask = 2**i
            newcode = base ^ mask
            dump_file_content = dump("0x{:032x}".format(newcode), arch, j)

            # print(dump_file_content)
            # Compare the disassemble to check which field changes: opcode, operand or modifer
            if dump_file_content and dump_file_content.find(
                    "?") == -1 and dump_file_content.find("error") == -1:
                tmp_pp, tmp_inst = filter_change(a_origin_inst,
                                                 dump_file_content, base,
                                                 newcode, j, i)
                if tmp_pp is not None:
                    # the ith bit affects tmp_ppth operand
                    a_origin_inst.operand_positions[tmp_pp].append(i)
                    # @todo print the reverse bit
                    logging.info("%s: %d\t%s" %
                                 ("0b{:0128b}".format(newcode), i, " ".join(
                                     tmp_inst.operands)))
            # if len(positions) > 0:
            #     logging.info("0b{:0128b}".format(bits) + ": %s opcode bits %s: ", origin_inst.op, positions)

        if len(a_origin_inst.opcode_positions) > 0:
            ops_bits[a_origin_inst.op] = list(
                set(
                    ops_bits.get(a_origin_inst.op, []) +
                    a_origin_inst.opcode_positions))
        logging.info("Operand combination types: %s", origin_operand_types)
        for i in range(0, len(a_origin_inst.operand_positions)):
            if len(origin_operand_types) > i:
                tmp_type = origin_operand_types[i]
            else:
                tmp_type = 'None'
            logging.info("Operand type: %s", tmp_type)
            logging.info("Encoding: %s", a_origin_inst.operand_positions[i])

    for node in ops_bits:
        logging.info("%s:\t[%s]", node,
                     ",".join(str(x) for x in ops_bits[node]))
示例#34
0
import gffutils
import warnings
import dumper

# provisioning on ubuntu/xenial64 virtual box:
# apt-get install -y git python3 python-setuptools python3-biopython python3-pip
# pip3 install dumper gffutils

data_dir = os.path.dirname(os.path.realpath(__file__))
test_gff_file = os.path.join(data_dir, 'SAMPLE.gff3.gz')
test_gff_db_file = os.path.join(data_dir, 'gffutils_test.db')

with warnings.catch_warnings():
    warnings.filterwarnings("ignore", "unclosed file <_io\.TextIOWrapper",
                            ResourceWarning, "gffutils", 133)
    warnings.filterwarnings("ignore", "generator '_FileIterator\.",
                            PendingDeprecationWarning, "gffutils", 186)
    warnings.filterwarnings("ignore", "unclosed file <_io\.TextIOWrapper",
                            ResourceWarning, "gffutils", 668)
    test_gff_db = gffutils.create_db(
        test_gff_file,
        dbfn=test_gff_db_file,
        force=True,  # overwrite previous testing db file
        merge_strategy='error',
        keep_order=
        False,  # True doesn't appear to maintain attribute order :-/  (and turning this off may be faster)
        sort_attribute_values=False)

    for this_feature in test_gff_db.all_features():
        print(dumper.dump(this_feature))
import dumper
import dbimporter
import tempfile
import os
import ez_aws

os.environ['TMPDIR'] = '/tmp/'

queue = ez_aws.Queue( 'frontend-logs' )

raw_bucket = ez_aws.Bucket( 'localch-frontend-logs' )
processed_bucket = ez_aws.Bucket( 'localch-frontend-logs-processed' )

for message in queue:
	tmp = tempfile.mkstemp()[1]
	logfile = message.get_body()
	raw_bucket.download_item_to_localfile( logfile, tmp )	
	dumper.dump( tmp, logfile )
	dbimporter.load_and_dump( tmp )
	processed_bucket.upload_localfile_to_item( tmp + '.dump', 'dump_' + logfile )
	processed_bucket.upload_localfile_to_item( tmp + '.paths', 'paths_' + logfile )
	processed_bucket.upload_localfile_to_item( tmp + '.queries', 'queries_' + logfile )
	processed_bucket.upload_localfile_to_item( tmp + '.referers', 'referers_' + logfile )
	processed_bucket.upload_localfile_to_item( tmp + '.uas', 'uas_' + logfile )
#	os.unlink( tmp )
#	os.unlink( tmp + '.processed' )
	queue.delete( message )
示例#36
0
 def test_dump_valid_data(self):
     dumped_data = dump('', self.valid_data, 0)
     self.assertEqual(dumped_data.strip(), self.valid_data_dumped)