Esempio n. 1
0
def workloop(rep):
    print('starting worker')
    import fstore

    pid = str(os.getpid())
    fstore.clearcont()
    pidfile = os.path.join(fstore.WORK, pid)
    open(pidfile, 'w').write(pid)
    try:
        while True:
            rep('entering loop')
            j = fstore.getjob()
            if not j:
                fstore.wait()
                rep('woke up')
                continue
            rep('decoding')
            jid, inps, j = j
            j = str2d(j)
            rep('have job %s' % jid)
            d = Doc()
            for did in inps:
                d.patch(str2d(fstore.getdoc(did)))
            if "_failed" in d:
                r = d
            else:
                j = Job(j['t'], j['p'], j['o'])
                r = j(d)
            rep('finishing %s' % j.t)
            r = d2str(r)
            fstore.finish(r, jid)
            rep('done')
    except SystemExit, KeyboardInterrupt:
        print('exiting worker')
        os.unlink(pidfile)
Esempio n. 2
0
class Job(object):
    def __init__(self, transform='', params=None, outpath=''):
        self.t = transform
        self._t = None
        self.p = Doc()
        if params:
            self.p.update(params, True)
        self.o = outpath or ''

    def __str__(self):
        return "Job: %s\n-----\n%s\n--------\n" % (self.t, str(self.p))

    def gettf(self):
        if self._t == None:
            self._t = blocks.transform(self.t)
        return self._t


    def keys(self, doc):
        ks = self.gettf()(doc, self.p)
        if isinstance(self.o, dict):
            for k in self.o:
                if k.startswith('->'):
                    ks.add(self.o[k])
        if '' in ks:
            ks = set(doc.keys())
        return ks

    def __call__(self, doc, subset=()):
        try:
            d, m = self.gettf()(doc, self.p)
            for s in m:
                report(s)
            dd = doc.subset(subset)
            if self.o:
                dd[self.o] = d
            else:
                dd = dd.fuse(d)
        except Exception as e:
            dd = Doc({'_failed': True,
                      '_traceback': format_exc(e),
                      '_input': doc,
                      '_job': self.todoc()})
        return dd

    def todoc(self, mode='doc'):
        d = Doc()
        d['t'] = self.t
        d['p'] = self.p
        d['o'] = self.o
        if mode == 'doc':
            return d
        elif mode == 'tup':
            return d.astuple()
        else:
            return enc.tojson(d)
Esempio n. 3
0
 def todoc(self, mode='doc'):
     d = Doc()
     d['t'] = self.t
     d['p'] = self.p
     d['o'] = self.o
     if mode == 'doc':
         return d
     elif mode == 'tup':
         return d.astuple()
     else:
         return enc.tojson(d)
Esempio n. 4
0
    def exec_jsdoc(self):
        if self._module is not None:
            try:
                d = getattr(self._module.plugin, 'Doc')(self._config)
            except AttributeError:
                d = Doc(self._config)
        else:
            d = Doc(self._config)

        d.run()

        print 'Successfully built the JSDoc documentation.'
Esempio n. 5
0
 def addDoc(self, doc: Doc) -> None:
     """ Adds a document to this table. 
     gives it an _id if need be.
     """
     if doc.hasId():
         if self.getDoc(doc._id):
             self.removeDoc(doc._id)           
     else:
         doc._id = self.newId()   
                 
     self.docs.append(doc)
     self.index.addIndex(doc._id, len(self.docs)-1)
Esempio n. 6
0
    def exec_jsdoc(self, plugin):
        try:
            doc = Doc(self._config)
            doc.build_doc()
        except:
            raise

        if plugin:
            try:
                plugin.after_doc()
            except AttributeError:
                pass

        print 'Successfully built the JSDoc documentation.'
Esempio n. 7
0
def main():
    print(ai.ALL_RESPONSES)
    while True:
        doc_id = input('doc_id=')
        response = input('response=')
        if database.doExist(database.DOCS, doc_id):
            raise Exception('Error 24154035')
        doc = Doc(getJSON(doc_id))
        doc.response = response
        database.saveDoc(doc)
        database.accOverall(response)
        for tag in doc.tags:
            database.accTagInfo(tag, response)
        print(doc)
Esempio n. 8
0
 def __init__(self, transform='', params=None, outpath=''):
     self.t = transform
     self._t = None
     self.p = Doc()
     if params:
         self.p.update(params, True)
     self.o = outpath or ''
Esempio n. 9
0
class Root(RestController):
    config = Config()
    crush = Crush()
    doc = Doc()
    mon = Mon()
    osd = Osd()
    pool = Pool()
    request = Request()
    server = Server()

    @expose(template='json')
    def get(self, **kwargs):
        """
        Show the basic information for the REST API
        This includes values like api version or auth method
        """
        return {
            'api_version':
            1,
            'auth':
            'Use "ceph tell mgr restful create_key <key>" to create a key pair, '
            'pass it as HTTP Basic auth to authenticate',
            'doc':
            'See /doc endpoint',
            'info':
            "Ceph Manager RESTful API server",
        }
Esempio n. 10
0
class DocHandler(xml.sax.ContentHandler):
    def __init__(self, es):
        self.CurrentData = ""
        self.doc = Doc()
        self.title = ""
        self.abstract = ""
        self.index = 0
        self.es = es

    def startElement(self, tag, attributes):
        self.CurrentData = tag
        #print(tag)
        if tag == "doc":
            self.index = self.index + 1
            print("Doc: ", self.index)
            #print([s.toString() for s in docs])

    def endElement(self, tag):
        #print(tag)
        if tag == "doc":
            self.es.index(index='temp1',
                          doc_type='wikiabstract',
                          id=self.index,
                          body=json.loads(self.doc.toString()))
        elif self.CurrentData == "title":
            self.doc.title = self.title
        elif self.CurrentData == "abstract":
            self.doc.abstract = self.abstract

    def characters(self, content):
        if self.CurrentData == "title":
            self.title = content
        elif self.CurrentData == "abstract":
            self.abstract = content
Esempio n. 11
0
    def _load_module(cls, module_name, filepath):
        """ a helper function to make load_plugin pretty. Assumes 'module_name' is in sys.path"""
        global logger
        spec = spec_from_file_location(module_name, filepath)
        module = module_from_spec(spec)
        # TODO - exec module can fail and really muck things up.
        spec.loader.exec_module(module)
        # TODO - testing a module can fail and really muck things up.
        if (Config.get('TEST_ON_LOAD') and getattr(module, '_test', None)
                and type(module._test) is FuncType):
            module._test_result = module._test()
            if module._test_result == SUCCESS:
                logger.info("Imported and tested {} from {}".format(
                    module.__name__, module_name))
            else:
                logger.error(
                    "plugin {} from file {} failed its own test".format(
                        module.__name__, module_name))
                return None
            pass
        else:
            logger.info("Imported {} from {}".format(module.__name__,
                                                     module_name))

        if hasattr(module, 'doc'):
            from doc import Doc
            if type(module.doc) is not dict:
                raise ValueError("a plugin's 'doc' should be a dictionary")
            else:
                try:
                    Doc(**module.doc)
                except Exception as e:
                    logger.exception(
                        "Failed to load Doc from {}".format(filepath))
        return module
Esempio n. 12
0
 def test_apiInput(self):
     """ load api_test.py and check the results """
     Config.load_plugin('inputs/api_input.py')
     module = Config.load_plugin('test/api_test.py')
     doc = Doc.from_url("api_test")
     result = doc.query( { 'page' : 1 } )
     self.assertEqual( search( ('reqres_ids', 'name'), result), "George Bluth")
Esempio n. 13
0
 def __getattr__(self, k):
     
     if k in ('_opt','__methods__', '_hasattr'):
         return Doc.__getattr__(self, k)
         
     v = getattr(self.__dict__['_data'], k)
     
     if v != None:
         
         if ( type( v ) == relation ):
             return getattr( v, k )
     
     if not k.startswith('__') and k in dir(self.__dict__['_data']):
         return v
         
     return Doc.__getattr__(self, k)
Esempio n. 14
0
    def next(self):
        """Iterator
		"""

        try:
            return Doc(self._collection, self._items.next())
        except:
            raise StopIteration
Esempio n. 15
0
def do_query(path, dic):
    d = Doc.from_url(path)
    rv = d.query(dic)
    global queryCount
    queryCount += 1
    print("Query{}: {}".format(queryCount, path))
    pprint(rv)
    print('============')
Esempio n. 16
0
 def index_writer(self, file_path):
     with open(file_path, 'r', encoding='utf-8') as f:
         for line in f.readlines():
             key, title, link = line.strip().split('\t\t')
             doc = Doc()
             doc.add('key', key)
             doc.add('title', title)
             doc.add('link', link)
             self.doc_list.append(doc)
     self.index()
Esempio n. 17
0
def load_docs(docs_pt):
    bs = []
    print("load docs: " + docs_pt)
    rf = open(docs_pt)
    if not rf:
        print("file not found: " + docs_pt)
    for line in rf.readlines():
        d = Doc(line)
        biterms = []
        d.gen_biterms(biterms)
        # statistic the empirical word distribution
        # for i in range(d.size()):
        #     w = d.get_w(i)
        #     pw_b[w] += 1
        for b in biterms:
            bs.append(b)
    # print(len(bs))
    return bs
Esempio n. 18
0
    def test_creation(self):
        self.db = Database("mydatabase")
        t = self.db["mytable"]
        self.assertEqual(t.count(), 0, "no documents yet")

        d = Doc(foo=2, bar=3)
        t.addDoc(d)
        self.assertEqual(t.count(), 1, "1 document now")
        print(d)
Esempio n. 19
0
    def save(self):
        '''Save/insert doc
        @TODO: buat acknowledge biar tau save/insert-nya berhasil atau gak?
        '''

        self.validate()
        
        if self._monga is None:
            raise SuperDocError, "This res not binded to monga object. Try to bind it first use set_monga(<monga instance>)"
        
        if self._monga.config.get('nometaname') == False:
            # buat metaname untuk modelnya
            
            setattr( self.__dict__['_data'], '_metaname_', self.__class__.__name__ )
        
        # reset dulu error tracknya, buat jaga kalo2 dibutuhkan buat error checking
        self._monga._db.reset_error_history()

        Doc.save(self)
        
        if self.get_last_error() is not None:
            return None
        
        global RELATION_RECURSION_DEEP, MAX_RECURSION_DEEP
        
        RELATION_RECURSION_DEEP += 1
        
        if RELATION_RECURSION_DEEP < MAX_RECURSION_DEEP:
            
            self._call_relation_attr( '_update_hash' )
            self._call_relation_attr( '_save' )
            
            RELATION_RECURSION_DEEP -= 1
        else:
            raise SuperDocError, "Recursion limit reached, max %d" % MAX_RECURSION_DEEP
        
        # eksekusi pending ops
        if not self._pending_ops.empty():
            self._pending_ops.apply_op_all()
        
        # refresh relation state
        self.__map_relation()
        
        return self
Esempio n. 20
0
 def index_writer(self, file_path):
     with open(file_path, 'r', encoding='utf-8') as f:
         for line in f.readlines():
             word, English, miandian, time = line.strip().split('///')
             doc = Doc()
             doc.add('word', word)
             doc.add('English', English)
             doc.add('miandian', miandian)
             # doc.add('time', time)
             self.doc_list.append(doc)
     self.index()
Esempio n. 21
0
	def correct(self, txt):
		"""
		given a string, identify the least-common n-gram not present in 'skip'
		and return a list of suggested replacements
		"""
		d = Doc(txt, self.w)
		changes = list(self.suggest(d, 1))
		for ch in changes:
			logger.debug('ch=%s' % (ch,))
			change = [ch[0].ngd]
			logger.debug('change=%s' % (change,))
			d.applyChanges(change)
			logger.debug('change=%s after applyChanges d=%s' % (change, d))
			d = Doc(d, self.w)
			break # FIXME: loops forever
			changes = list(self.suggest(d, 1))
		res = str(d).decode('utf8')
		logger.debug('correct res=%s %s' % (type(res),res))
		return res
Esempio n. 22
0
def query(path):
    # simple and it works
    src = None
    try:
        src = Doc.from_url(path)
    except ValueError as e:
        return "Error: <{}>".format(e)
    except BadRequest as e:
        return json.dumps({"Error": "Bad Request <{}>".format(e)})
    if not src:
        return json.dumps(
            ["Error: Unknown API endpoint <{}>".format(path),
             Doc.urls()])
    #logger.info(pformat(request.values))
    rv = src.query(request.values)
    #logger.info(pformat(rv))
    return json.dumps(rv,
                      indent=Config.get("JSON_PRETTY"),
                      default=custom_json_encoder)
Esempio n. 23
0
 def test_csvInput(self):
     """ load csv_test.py and check the results """
     module = Config.load_plugin('test/csv_test.py')
     doc = Doc.from_url("csv_test")
     result = doc.query( { 'hash' : '41e25e514d90e9c8bc570484dbaff62b' } )
     self.assertEqual( result, {'name':'cmd.exe',
                                'hash':'41e25e514d90e9c8bc570484dbaff62b',
                                'from_csv_input': True,
                                'date.created': datetime(2018,2,20,11,23,0),
                                'nonce' : 'ca79d9cbb8c73cbe610dfa05030a2183'} )
Esempio n. 24
0
 def index_writer(self, file_path):
     for dirpath, dirnames, filenames in os.walk(file_path):
         for i in filenames:
             with open(file_path+"\\"+i, 'r', encoding='utf-8') as f:
                 key, title, context = f.read().split('\t\t')
                 #读取文件 并根据关键词存入 关键词和网页题目,以及内容。
                 doc = Doc()
                 doc.add('key', key)
                 doc.add('title', title)
                 doc.add('context', context)
                 self.doc_list.append(doc)
     self.index()
Esempio n. 25
0
    def find_one(self, **kwargs):
        """Find one single document. Mainly this is used to retrieve
		documents by unique key.
		"""

        docs = self._db[self._name].find_one(self._parse_query(kwargs))

        if docs is None:
            return None

        return Doc(self, docs.to_dict())
Esempio n. 26
0
 def index_by_file(self, filepath):
     doclist = []
     for line in open(filepath, 'r').readlines():
         cols = line.rstrip().split('\t')
         if len(cols) == 3:
             id = cols[0]
             name = cols[1]
             text = cols[2]
             doc = Doc(id, name, text)
             doclist.append(doc)
     self.index(doclist)
Esempio n. 27
0
def build_html_doc(template_fname, verbosity, archetype = None):
    """
    Archetype required only when building archetype docs.

    """
    # archetypes all use the same template.. but we don't want to 
    # put them in the same doc file.
    if archetype is not None:
        doc_fname = archetype.get_id()
    else:
        doc_fname = template_fname

    # base name .. no extension
    doc_base_fname, _ = splitext(basename(doc_fname))
    xml_fname = join(build_dir, "%s.xml" % doc_base_fname)
    html_fname = join(build_dir, "%s.html" % doc_base_fname)

    # parse an xml document
    doc = Doc(xml_fname)        
    if not doc.parse():
        print("Problem parsing the xml.")
        exit(0)

    if not doc.validate() and fail_fast:
        print("Fatal: xml errors are fatal!")
        print("Run with the -s cmd line option to ignore xml errors.")
        exit(0)
    
    
    # build the html document by converting the xml into tex
    with codecs.open(html_fname, "w", "utf-8") as f:
        html_formatter = HtmlFormatter(f)
        errors = doc.format(html_formatter)
        if len(errors) > 0:
            print("Errors:")
            for error in errors:
                print("\t%s\n\n\n" % error)
                
            if fail_fast:
                sys.exit()
    return
Esempio n. 28
0
    def create_document(self, req):
        """ Creates document class """

        args = []

        print('self.get_template:', self.get_template(req))

        args = [self.get_template(req), self.env, self, req]

        document = Doc(args)

        return document
Esempio n. 29
0
    def test_addSomeDocs(self):
        t = self.db["mytable"]
        self.assertEqual(t.count(), 1, "still 1 document")

        countSB = 1
        for foo in [5, 4, 6]:
            for bar in ['Cedric', 'Alice', 'Bob']:
                d = Doc(foo=foo, bar=bar)
                t.addDoc(d)
                countSB += 1
                self.assertEqual(t.count(), countSB,
                                 "count of documents in mytable")
Esempio n. 30
0
    def get(self, docKey):
        keyContents = self.cache.get(docKey)
        if not keyContents:
            k = Key(self.__getBucket())
            k.key = docKey
            keyContents = k.get_contents_as_string()
            self.cache.set(docKey, keyContents, self.__cacheExpiry)

        storedTags = json.loads(keyContents)
        content = storedTags.pop('content', None)
        tags = storedTags

        return Doc(docKey, content, tags)
Esempio n. 31
0
def get_file_docmap(filename):
    with open(filename) as f:
        xml = etree.fromstring("<root>" + f.read() + "</root>")

    docs = {}
    for node in xml.iter("DOC"):
        d = Doc.from_xml_node(node)

        if d.num in docs:
            print("Clash d.num in docs, %d" % d.num)
            sys.exit(1)

        docs[d.num] = d

    return docs
Esempio n. 32
0
    def find_one(self, **kwargs):
        """Find one single document. Mainly this is used to retrieve
		documents by unique key.
		"""

        if '_id' in kwargs:
            args = pymongo.objectid.ObjectId(str(kwargs['_id']))
        else:
            args = self._parse_query(kwargs)

        docs = self._db[self._name].find_one(args)

        if docs is None:
            return None

        return Doc(self, docs)
Esempio n. 33
0
def sample(population):
    if random.random() < EXPLORE_PROB:
        # Explore
        doc_id = random.choice(population)
        return (doc_id, EXPLORE)
    else:
        # Exploit
        jsons = [getJSON(x) for x in population]
        docs = []
        for j in jsons:
            try:
                docs.append(Doc(j))
            except DocNotSuitable:
                continue
        y_hats = [(x.id, predict(x)) for x in docs]
        highscore = max([y for x, y in y_hats])
        results = [x for x, y in y_hats if y == highscore]
        return (results[0], EXPLOIT)
Esempio n. 34
0
 def run(self):
     main_html = self.down_html(self.main_page)
     main_tree = HtmlTree(main_html)
     if not url_filter[self.main_page]:
         url_filter.urls[self.main_page] = {}
         url_filter.urls[self.main_page]['cr'] = long(time.time() * 1000)
     url_filter.urls[self.main_page]['fr'] = long(time.time() * 1000)
     for url in self.extract_url(main_tree.tree, url_rule='//h1//a'):
         if not url_filter[url]:
             url_filter.urls[url] = {}
             url_filter.urls[url]['cr'] = long(time.time() * 1000)
             html = self.down_html(url)
             child_tree = HtmlTree(main_html)
             doc = Doc(self.get_title(html),
                       self.make_content(self.extract.get_text(html)), 'li',
                       'computer', self.tag_filter(self.get_keywords(html)))
             save_doc(doc, doc.file_name)
         else:
             url_filter.urls[url]['fr'] = long(time.time() * 1000)
Esempio n. 35
0
    def load_docs(self, docs_pt):
        print("load docs: " + docs_pt)
        rf = open(docs_pt)
        if not rf:
            print("file not found: " + docs_pt)

        for line in rf.readlines():
            d = Doc(line)
            biterms = []
            d.gen_biterms(biterms)
            # statistic the empirical word distribution
            for i in range(d.size()):
                w = d.get_w(i)
                self.pw_b[w] += 1
            for b in biterms:
                self.bs.append(b)

        self.pw_b.normalize()
Esempio n. 36
0
 def delete(self):
     
     # hapus juga semua anak yg menjadi relasi di dalamnya
     # yang diset sebagai cascade=delete
     # fitur cascade tidak support many-to-many relation
     self._call_relation_attr('_delete_cascade')
     
     # update relation list metadata
     rels = filter( lambda x: type( getattr( self.__class__, x ) ) == relation, dir(self.__class__) )
     for rel in rels:
         
         vrela = getattr( self.__class__, rel )
         
         if vrela._type != 'many-to-many':
            break
         
         keyrel = getattr( vrela, '_keyrel' )
         
         if getattr(self, keyrel[0]) is None:
             break
         
         backref = getattr( vrela, '_backref' )
         
         rela = getattr( vrela, '_get_rel_class' )()
         mykey = getattr(self.__dict__['_data'],backref[1])
         
         all_rela_obj = self._monga._db[rela._collection_name].find({ keyrel[0]: mykey })
         
         col_save = self._monga._db[rela._collection_name].save
         
         for rela_obj in all_rela_obj:
             
             rela_obj[keyrel[0]].remove(mykey)
             col_save(rela_obj)
             
     
     return Doc.delete(self)
Esempio n. 37
0
    def correct(self, txt):
        """
		given a string, identify the least-common n-gram not present in 'skip'
		and return a list of suggested replacements
		"""
        d = Doc(txt, self.w)
        changes = list(self.suggest(d, 1))
        for ch in changes:
            logger.debug('ch=%s' % (ch, ))
            change = [ch[0].ngd]
            logger.debug('change=%s' % (change, ))
            d.applyChanges(change)
            logger.debug('change=%s after applyChanges d=%s' % (change, d))
            d = Doc(d, self.w)
            break  # FIXME: loops forever
            changes = list(self.suggest(d, 1))
        res = str(d).decode('utf8')
        logger.debug('correct res=%s %s' % (type(res), res))
        return res
Esempio n. 38
0
    def load_docs(self, docs_pt):
        '''
        @description: 
        @param docs_pt:
        @return: 
        '''
        print("load docs: " + docs_pt)
        rf = open(docs_pt)
        if not rf:
            print("file not found: " + docs_pt)

        for line in rf.readlines():
            d = Doc(line)
            biterms = []  #一句话里的单词能组成的词对。
            d.gen_biterms(biterms)
            # statistic the empirical word distribution
            for i in range(d.size()):
                w = d.get_w(i)
                self.pw_b[w] += 1  #这行代码是在统计词频
            for b in biterms:
                self.bs.append(b)  #self.bs中添加的是一个biterm类。类的内容是这段文本中所有可能的词的组合.
        self.pw_b.normalize()  #做归一化处理,现在 pw_b中保存的是 词:词频率。
Esempio n. 39
0
class XMLToCs:
	def __init__(self, filepath):
		self.filepath = filepath;
		self.datatables = []
		self.doc = Doc()
		
	def transfer(self):
		doc = ElementTree.parse(self.filepath)
		root = doc.getroot()
		database = root.getchildren()
		for table in database:
			datatable = Table(table.attrib['tablename'])
			columns = table.getchildren()
			for column in columns:
				datatable.add(column.attrib['columnname'], column.attrib['columntype'],column.attrib['columnwidth'])
			self.datatables.append(datatable)
	
	def showall(self):
		for t in self.datatables:
			t.show()
			
	def initialdoc(self):
		self.doc.clear()
		
	def buildmodel(self):
		for model in self.datatables:
			self.initialdoc()
			self.doc.append(Linefac.genlineobj('using System;'))
			self.doc.append(Linefac.genlineobj('using System.Colllesections.Generic;;'))
			self.doc.append(Linefac.genlineobj('using System.Linq;'))
			self.doc.append(Linefac.genlineobj('using System.Web;'))
			self.doc.append(Linefac.genlineobj(''))
			self.doc.append(Linefac.genlineobj('namespace Model'))
			self.doc.append(Linefac.genlineobj('{'))
			self.doc.append(Linefac.genlineobj('public class' + model.tablenamecap))
			self.doc.append(Linefac.genlineobj('{'))
		
			for field in model.columns:
				self.doc.append(Linefac.genlineobj('private ' + field['columnnettype'] + ' ' + field['columnname'] + ';'))
		
			self.doc.append(Linefac.genlineobj(''))
			for field in model.columns:
				self.doc.append(Linefac.genlineobj('public ' + field['columnnettype'] + ' ' + field['columnnamecap']))
				self.doc.append(Linefac.genlineobj('{'))
				self.doc.append(Linefac.genlineobj('get { return ' + field['columnname'] + '; }'))
				self.doc.append(Linefac.genlineobj('set { ' + field['columnname'] + ' = value; }'))
				self.doc.append(Linefac.genlineobj('}'))
			self.doc.append(Linefac.genlineobj('}'))
			self.doc.append(Linefac.genlineobj('}'))
		
			self.writemodel(model)
		
	def writemodel(self, model):
		if (not os.path.exists('Model')) or (os.path.isfile('Model')):
			os.makedirs('Model')
		f = file("Model\\" + model.tablenamecap + '.cs', 'w')
		self.doc.writeall(f)
		print 'Model:', model.tablenamecap, 'is Done!'
		
	def buildbll(self):
		for model in self.datatables:
			self.initialdoc()
			self.doc.append(Linefac.genlineobj('using System;'))
			self.doc.append(Linefac.genlineobj('using System.Colllesections.Generic;'))
			self.doc.append(Linefac.genlineobj('using System.Linq;'))
			self.doc.append(Linefac.genlineobj('using System.Web;'))
			self.doc.append(Linefac.genlineobj('using System.Data;'))
			self.doc.append(Linefac.genlineobj('using System.Data.SqlClient;'))
			self.doc.append(Linefac.genlineobj('using DBUnity;'))
			self.doc.append(Linefac.genlineobj('using Model;'))
			self.doc.append(Linefac.genlineobj('using System.Text;'))
			self.doc.append(Linefac.genlineobj(''))
			self.doc.append(Linefac.genlineobj('namespace BLL'))
			self.doc.append(Linefac.genlineobj('{'))
			self.doc.append(Linefac.genlineobj('public class' + model.tablenamecap + "BLL()"))
			self.doc.append(Linefac.genlineobj('{'))
			self.doc.append(Linefac.genlineobj('public ' + model.tablenamecap + 'BLL(){}'))
			self.doc.append(Linefac.genlineobj(''))
			self.doc.append(Linefac.genlineobj('static readonly SQLDatabase db = new SQLDatabase();'))
			self.doc.append(Linefac.genlineobj(''))
			self.buildinsert(model)
			self.buildselect(model)
			self.buildupdate(model)
			self.builddelete(model)
			self.doc.append(Linefac.genlineobj('}'))
			self.doc.append(Linefac.genlineobj('}'))
			
			self.writebll(model)
		
	def writebll(self, model):
		if (not os.path.exists('BLL')) or (os.path.isfile('BLL')):
			os.makedirs('BLL')
		f = file("BLL\\" + model.tablenamecap + 'BLL.cs', 'w')
		try:
			self.doc.writeall(f)
			print 'BLL:', model.tablenamecap, 'BLL is Done!'
		except:
			print 'BLL:', model.tablenamecap, 'BLL error while write!'
		finally:
			f.close()
	
	def buildinsert(self, model):
		self.doc.append(Linefac.genlineobj('public static bool Insert(List<' + model.tablenamecap + '> models, ref string e)'))
		self.doc.append(Linefac.genlineobj('{'))
		self.doc.append(Linefac.genlineobj('int count = models.Count;'))
		self.doc.append(Linefac.genlineobj('for(int i = 0; i < count; i++)'))
		self.doc.append(Linefac.genlineobj('{'))
		
		strsql = 'insert into sb_' + model.tablenamecap + '('
		strsql += ','.join(model.columnnamelist) + ')'
		strsql += 'values(@'
		strsql += ",@".join(model.columnnamelist) + ')'		
		self.doc.append(Linefac.genlineobj('string sql = \"' + strsql + '\";'))
		
		self.doc.append(Linefac.genlineobj('SqlParameter[] pamaters = '))
		self.doc.append(Linefac.genlineobj('{'))
		
		templist = []
		for col in model.columns:			
			templist.append('new SqlParameter(\"@' + col['columnname'] + '\", SqlDbType.' + col['columnsqldbtype'] + ', ' + col['columnsqldbwidth'] + ')')
		tempstr = ', '.join(templist)
		self.doc.append(Linefac.genlineobj(tempstr))
		self.doc.append(Linefac.genlineobj('};'))
		
		tempcount = 0
		for col in model.columns:
			self.doc.append(Linefac.genlineobj('parameters[' + str(tempcount) + '].Value = model[i].' + col['columnnamecap'] + ';'))
			tempcount = tempcount + 1
		self.doc.append(Linefac.genlineobj(''))
		
		self.doc.append(Linefac.genlineobj('e += db.InsertExec(sql, parameters);'))
		self.doc.append(Linefac.genlineobj('if(e != "")'))
		self.doc.append(Linefac.genlineobj('{'))
		self.doc.append(Linefac.genlineobj('e += \"Error in insert!\";'))
		self.doc.append(Linefac.genlineobj('return false'))
		self.doc.append(Linefac.genlineobj('}'))
		self.doc.append(Linefac.genlineobj('return true'))
		self.doc.append(Linefac.genlineobj('}'))
		self.doc.append(Linefac.genlineobj('}'))
		
	def buildselect(self, model):
		self.doc.append(Linefac.genlineobj('public static bool Select(List<' + model.tablenamecap +'> models, string sql, ref string e)'))
		self.doc.append(Linefac.genlineobj('{'))
		self.doc.append(Linefac.genlineobj('DataTable table = new DataTable();'))
		self.doc.append(Linefac.genlineobj('table = db.QueryDataTable(sql, ref e);'))
		self.doc.append(Linefac.genlineobj('if (e != "")'))
		self.doc.append(Linefac.genlineobj('{'))
		self.doc.append(Linefac.genlineobj('for (int i = 0; i < table.Rows.Count; i++)'))
		self.doc.append(Linefac.genlineobj('{'))
		self.doc.append(Linefac.genlineobj(model.tablenamecap + 'model = new ' + model.tablenamecap + '();'))
		
		for col in model.columns:
			self.doc.append(Linefac.genlineobj('model.' + col['columnnamecap'] + ' = (' + col['columnnettype'] + ')table.Rows[i][\"' + col['columnname'] + '\"];'))
		self.doc.append(Linefac.genlineobj('models.Add(model);'))
		
		self.doc.append(Linefac.genlineobj('}'))
		self.doc.append(Linefac.genlineobj('return true;'))
		self.doc.append(Linefac.genlineobj('}'))
		self.doc.append(Linefac.genlineobj('else'))
		self.doc.append(Linefac.genlineobj('{'))
		self.doc.append(Linefac.genlineobj('e += \"Error in select!\";'))
		self.doc.append(Linefac.genlineobj('return false;'))
		self.doc.append(Linefac.genlineobj('}'))
		self.doc.append(Linefac.genlineobj('}'))
		
	def buildupdate(self, model):
		self.doc.append(Linefac.genlineobj('public static bool Update(' + model.tablenamecap + ' model, ref string e'))
		self.doc.append(Linefac.genlineobj('{'))
		self.doc.append(Linefac.genlineobj('StringBuilder strSql = new StringBuilder();'))
		self.doc.append(Linefac.genlineobj('strSql.Append(\"update tb_' + model.tablenamecap + ' set \");'))
		for col in model.columns[1:]:
			self.doc.append(Linefac.genlineobj('strSql.Append(\"' + col['columnname'] + '=@' + col['columnname'] + ',\");'))
		self.doc.append(Linefac.genlineobj('strSql.Append(\" where ' + model.columns[0]['columnname'] + '=@' + model.columns[0]['columnname'] + ' \");'))
		
		self.doc.append(Linefac.genlineobj('SqlParameter[] pamaters = '))
		self.doc.append(Linefac.genlineobj('{'))
		
		templist = []
		for col in model.columns:			
			templist.append('new SqlParameter(\"@' + col['columnname'] + '\", SqlDbType.' + col['columnsqldbtype'] + ', ' + col['columnsqldbwidth'] + ')')
		tempstr = ', '.join(templist)
		self.doc.append(Linefac.genlineobj(tempstr))
		self.doc.append(Linefac.genlineobj('};'))
		
		tempcount = 0
		for col in model.columns:
			self.doc.append(Linefac.genlineobj('parameters[' + str(tempcount) + '].Value = model[i].' + col['columnnamecap'] + ';'))
			tempcount = tempcount + 1
		self.doc.append(Linefac.genlineobj(''))
		
		self.doc.append(Linefac.genlineobj('e = db.QueryExec(strSql.ToString(), parameters);'))
		self.doc.append(Linefac.genlineobj('if(e != "")'))
		self.doc.append(Linefac.genlineobj('{'))		
		self.doc.append(Linefac.genlineobj('e += \"Error in update!\";'))
		self.doc.append(Linefac.genlineobj('return false;'))
		self.doc.append(Linefac.genlineobj('}'))
		self.doc.append(Linefac.genlineobj('return true;'))
		self.doc.append(Linefac.genlineobj('}'))
		
	def builddelete(self, model):		
		self.doc.append(Linefac.genlineobj('public static bool delete(' + model.columns[0]['columnnettype'] + " key, ref string e)"))
		self.doc.append(Linefac.genlineobj('{'))
		self.doc.append(Linefac.genlineobj('StringBuilder strSql = new StringBuilder();'))
		self.doc.append(Linefac.genlineobj('strSql.Append(\"delete from tb_' + model.tablenamecap + ' \");'))
		self.doc.append(Linefac.genlineobj('strSql.Append(\" where ' + model.columns[0]['columnname'] + '=@' + model.columns[0]['columnname'] + ' \");'))
		self.doc.append(Linefac.genlineobj('SqlParameter[] parameters = { new SqlParameter(\"@' + model.columns[0]['columnname'] + '\", SqlDbType.' + model.columns[0]['columnsqldbtype'] + ',' + model.columns[0]['columnsqldbwidth'] + ')};'))
		self.doc.append(Linefac.genlineobj('parameters[0].Value = key;'))
		self.doc.append(Linefac.genlineobj('e = db.QueryExec(strSql.ToString(), parameters);'))
		self.doc.append(Linefac.genlineobj('if(e != "")'))
		self.doc.append(Linefac.genlineobj('{'))		
		self.doc.append(Linefac.genlineobj('e += \"Error in delete!\";'))
		self.doc.append(Linefac.genlineobj('return false;'))
		self.doc.append(Linefac.genlineobj('}'))
		self.doc.append(Linefac.genlineobj('return true;'))
		self.doc.append(Linefac.genlineobj('}'))
Esempio n. 40
0
	def __init__(self, filepath):
		self.filepath = filepath;
		self.datatables = []
		self.doc = Doc()
Esempio n. 41
0
#!c:\python27\python.exe
#filename:main.py

from xmlreader import XmlReader
from doc import Doc
from linefac import Linefac
import os

doc = Doc()
reader = XmlReader()
classlist = reader.read()
for c in classlist:
	doc.clear()
	doc.append(Linefac.genlineobj('using System;'))
	doc.append(Linefac.genlineobj('using System.Collections.Generic;'))
	doc.append(Linefac.genlineobj('using System.Linq;'))
	doc.append(Linefac.genlineobj('using System.Web;'))
	doc.append(Linefac.genlineobj(''))
	
	namespace = raw_input('Please enter namespace:')
	doc.append(Linefac.genlineobj('namespace ' + namespace))
	doc.append(Linefac.genlineobj('{'))
	doc.append(Linefac.genlineobj('public class ' + c.name))
	doc.append(Linefac.genlineobj('{'))
	doc.merge(c.genclass())
	
	doc.append(Linefac.genlineobj('}'))
	doc.append(Linefac.genlineobj('}'))
	
	if (not os.path.exists('Result')) or (os.path.isfile('Result')):
		os.makedirs('Result')
Esempio n. 42
0
def param_composit(jid, seq):
    ids = pr_seq_full(jid, seq[1])
    ids.reverse()
    return Doc.fuse(*[seq[0][i].p for i in ids])
Esempio n. 43
0
	def run(self, parent, doc, para):
		parent.doc = Doc()
		parent.doc.para = {'w':para['w'], 'h':para['h']}
		parent.canvas.Zoom(1/parent.canvas.Scale, 
			(para['w']//2, -para['h']//2))
Esempio n. 44
0
 def test_satisfies(self):
     d = Doc(aaa=3, bbb=6, c='cat')
     r = d.satisfies(None)
     self.assertTrue(r, "query None -> always True") 
     r = d.satisfies({})
     self.assertTrue(r, "query {} -> always True") 
     
     r = d.satisfies({'aaa':3})
     self.assertTrue(r, "{'aaa':3} -> True") 
     r = d.satisfies({'aaa':66})
     self.assertFalse(r, "{'aaa':66} -> False") 
     
     r = d.satisfies({'c':'cat'})
     self.assertTrue(r, "{'c':'cat'} -> True") 
     r = d.satisfies({'c':'owl'})
     self.assertFalse(r, "{'c':'owl'} -> False") 
     
     # multiple test must all be right
     r = d.satisfies({'aaa':3, 'bbb':6, 'c':'cat'})
     self.assertTrue(r, "{'aaa':3, 'bbb':6, 'c':'cat'} -> True") 
     r = d.satisfies({'aaa':3, 'bbb':6, 'c':'llama'})
     self.assertFalse(r, "{'aaa':3, 'bbb':6, 'c':'llama'} -> False") 
     
     # tests to non-existent field fail
     r = d.satisfies({'aaa':3, 'bbb':6, 'cc':'cat'})
     self.assertFalse(r, "{'aaa':3, 'bbb':6, 'cc':'cat'} -> False") 
     r = d.satisfies({'no':'nnn'})
     self.assertFalse(r, "{'no':'nnn'} -> False") 
Esempio n. 45
0
def prange(transform, baseparams, ranges):
    jobs = []
    baseparams = Doc(baseparams)
    for pd in pspace(ranges):
        jobs.append(Job(transform, baseparams.fuse(pd)))
    return jobs
Esempio n. 46
0
	def suggest(self, txt, max_suggest=1, skip=[]):
		"""
		given a string, run suggest() and apply the first suggestion
		"""
		logger.debug('Chick.suggest(txt=%s max_suggest=%s, skip=%s)' % (txt, max_suggest, skip))

		d = Doc(txt, self.w)
		logger.debug('doc=%s' % d)

		"""
		locate uncommon n-gram sequences which may indicate grammatical errors
		see if we can determine better replacements for them given their context
		"""

		# order n-grams by unpopularity
		ngsize = min(3, d.totalTokens())
		logger.debug('ngsize=%s d.totalTokens()=%s' % (ngsize, d.totalTokens()))
		logger.debug('ngram(1) freq=%s' % list(d.ngramfreqctx(self.g,1)))

		# locate the least-common ngrams
		# TODO: in some cases an ngram is unpopular, but overlapping ngrams on either side
		# are relatively popular.
		# is this useful in differentiating between uncommon but valid phrases from invalid ones?
		"""
sugg       did the future 156
sugg            the future would 3162
sugg                future would undoubtedly 0
sugg                       would undoubtedly be 3111
sugg                             undoubtedly be changed 0
		"""

		least_common = sort1(d.ngramfreqctx(self.g, ngsize))
		logger.debug('least_common=%s' % least_common[:20])
		# remove any ngrams present in 'skip'
		least_common = list(dropwhile(lambda x: x[0] in skip, least_common))
		# filter ngrams containing numeric tokens or periods, they generate too many poor suggestions
		least_common = list(filter(
					lambda ng: not any(re.match('^(?:\d+|\.)$', n[0][0], re.U)
							for n in ng[0]),
					least_common))

		# FIXME: limit to reduce work
		least_common = least_common[:max(20, len(least_common)/2)]

		# gather all suggestions for all least_common ngrams
		suggestions = []
		for target_ngram,target_freq in least_common:
			suggs = self.ngram_suggest(target_ngram, target_freq, d, max_suggest)
			if suggs:
				suggestions.append(suggs)

		if not suggestions:
			"""
			"""
			ut = list(d.unknownToks())
			logger.debug('unknownToks=%s' % ut)
			utChanges = [(u, (self.w.correct(u[0]), u[1], u[2], u[3])) for u in ut]
			logger.debug('utChanges=%s' % utChanges)
			utChanges2 = list(filter(lambda x: x not in skip, utChanges))
			for old,new in utChanges2:
				td = TokenDiff([old], [new], damerau_levenshtein(old[0], new[0]))
				ngd = NGramDiff([], td, [], self.g)
				ngds = NGramDiffScore(ngd, None, 1)
				suggestions.append([ngds])

		logger.debug('------------')
		logger.debug('suggestions=%s' % (suggestions,))
		suggs = filter(lambda x:x and x[0].ngd.newfreq != x[0].ngd.oldfreq, suggestions)
		logger.debug('suggs=%s' % (suggs,))
		# sort suggestions by their score, highest first
		bestsuggs = rsort(suggs, key=lambda x: x[0].score)
		# by total new frequency...
		bestsuggs = rsort(bestsuggs, key=lambda x: x[0].ngd.newfreq)
		# then by improvement pct. for infinite improvements this results in
		# the most frequent recommendation coming to the top
		bestsuggs = rsort(bestsuggs, key=lambda x: x[0].improve_pct())

		# finally, allow frequency to overcome small differences in score, but only
		# for scores that are within 1 to begin with.
		# if we account for frequency too much the common language idioms always crush
		# valid but less common phrases; if we don't account for frequency at all we often
		# recommend very similar but uncommon and weird phrases. this attempts to strike a balance.
		"""
		bestsuggs.sort(lambda x,y:
			x[0].score - y[0].score if abs(x[0].score - y[0].score) > 1 \
			else \
				(y[0].score + int(log(y[0].ngd.newfreq))) - \
				(x[0].score + int(log(x[0].ngd.newfreq))))
		"""

		for bs in bestsuggs:
			for bss in bs:
				logger.debug('bestsugg %6.2f %2u %2u %7u %6.0f%% %s' % \
					(bss.score, bss.ediff, bss.ngd.diff.damlev,
					 bss.ngd.newfreq, bss.improve_pct(), ' '.join(bss.ngd.newtoks())))

		for bs in bestsuggs:
			logger.debug('> bs=%s' % (bs,))
			yield bs
Esempio n. 47
0
    def POST(self):
        start_time = time()
        text = unicode(web.input().get("text", ""))
        lines = text.split("\r\n")

        act = web.input().get("act", "")
        if act == "Replace":
            # FIXME: if replacement takes place, update location/offsets
            # of all remaining session['suggestions']
            replacement_index = int(web.input().get("replacement_index", "0"))
            if replacement_index:
                d = Doc(lines, chick.w)
                replacements = session.get("replacements")
                if replacement_index <= len(replacements):
                    replacement = replacements[replacement_index - 1]
                    d.applyChanges([replacement])
                    text = str(d)
                    lines = d.lines
                    logger.debug("after replacement lines=%s" % (lines,))
                session["suggestions"].pop(0)
        elif act == "Skip to next...":
            session["skip"].append(session["target"])
            session["suggestions"].pop(0)
        elif act == "Done":
            # nuke target, replacements, skip, etc.
            session.kill()

        sugg2 = []
        suggs = []
        suggestions = []
        replacements = []

        if act and act != "Done":
            suggestions = session["suggestions"]
            if not suggestions:
                logger.debug("suggest(lines=%s)" % (lines,))
                suggestions = list(chick.suggest(lines, 5, session["skip"]))
            if not suggestions:
                target, suggs, sugg2 = None, [], []
            else:
                # calculate offsets based on line length so we can highlight target substring in <texarea>
                off = [len(l) + 1 for l in lines]
                lineoff = [0] + [sum(off[:i]) for i in range(1, len(off) + 1)]
                changes = suggestions[0]
                target = changes[0].ngd.oldtoks()
                for ch in changes:
                    ngd = ch.ngd
                    replacements.append(ngd)
                    o = ngd.old()
                    r = ngd.new()
                    linestart = o[0][1]
                    lineend = o[-1][1]
                    start = o[0][3]
                    end = o[-1][3] + len(o[-1][0])
                    sugg2.append((" ".join(ngd.newtoks()), lineoff[linestart] + start, lineoff[lineend] + end))
            session["target"] = target
            session["replacements"] = replacements
            session["suggestions"] = suggestions

        elapsed = round(time() - start_time, 2)
        return render.check(text, sugg2, lines, elapsed, suggestions)
Esempio n. 48
0
    def __setattr__(self, k, v):
        
        # check is keyname not in restrict_attribute_names
        if k in restrict_attribute_names:
            raise SuperDocError, "`%s` have restricted keyword `%s`. Please put another name." % (self.__class__.__name__,k)
        
        if k in superdoc_reserved_words or k.startswith('_x_'):
            return Doc.__setattr__(self, k , v)
        
        if self.__dict__.has_key('_opt') and self._opt.get('strict') == True:
            if self.__has_entryname( k ) == False:
                raise SuperDocError, "`%s` is strict model. Cannot assign entryname for `%s`" % ( self.__class__.__name__,k )
        
        if hasattr( self.__class__, '_x_%s' % k ):
            typedata = getattr( self.__class__, '_x_%s' % k )
            vt = type(v)
            if typedata != vt and vt is not types.NoneType:
                
                if typedata is bool and v not in (1,0):
                    raise SuperDocError, "mismatch data type `%s`=%s and `%s`=%s" % (k,typedata,v,type(v))
                
                if type(typedata) != variant:
                    # try to convert it if possible
                    try:
                        v = typedata(v)
                    except:
                        raise SuperDocError, "mismatch data type `%s`=%s and `%s`=%s" % (k,typedata,v,type(v))
                        
            
        # check if one-to-one relation
        # just map it to pk==fk
        if hasattr(self.__class__,k) and type( getattr(self.__class__,k) ) == relation and isinstance(v,(SuperDoc,relation)):
            
            if type(v) == relation:
                Doc.__setattr__(self, k , v)
                if v._type != 'one-to-one' or v._data is None:
                    return
            
            r = getattr(self.__class__, k)
            
            if r._type == "one-to-one":
                
                if r._pk[0] == '_id':
                    if not hasattr(v,'_id') or v._id == None:
                        if self._monga is None:
                            raise RelationError, "cannot auto-save one-to-one relation in smart object assignment. is object not binded with monga instance?"
                        # may unsaved doc, save it first
                        v.set_monga(self._monga)
                        v.save()

                elif not v._hasattr(r._pk[0]):
                    raise RelationError, "relation model `%s` don't have keyname `%s`" % (v.__class__.__name__, r._pk[0])
                        
                    
                fkey = getattr( v, r._pk[0] )
                if fkey is not None:
                    setattr( self.__dict__['_data'], r._pk[1], type(fkey) == ObjectId and str(fkey) or fkey )
                else:
                    # relasi terbalik berarti masukin ke pending ops ajah...
                    fkey = getattr( self.__dict__['_data'], r._pk[1] )
                    self._pending_ops.add_op( v, 'setattr', key=r._pk[0], value=ObjectId and str(fkey) or fkey )
                    self._pending_ops.add_op( v, 'save' )
                    
            elif r._type == "many-to-one":
                setattr( self.__dict__['_data'], '__meta_pcname__', v.__class__.__name__ )
                setattr( self.__dict__['_data'], r._pk, unicode(v._id) )
                
        else:
            Doc.__setattr__(self, k , v)