示例#1
0
  def SetUpStatsForTesting(self, pkgstat_module):
    for md5_sum, data in pkgstat_module.pkgstats[0]['elfdump_info'].iteritems():
      json = cjson.encode(data)
      content_hash = hashlib.md5()
      content_hash.update(json)
      models.ElfdumpInfoBlob(
          md5_sum=md5_sum,
          json=json,
          content_md5_sum=content_hash.hexdigest(),
          mime_type='application/json')
    data = copy.deepcopy(pkgstat_module.pkgstats[0])
    data['elf_callback'] = None
    json = cjson.encode(data)
    content_hash = hashlib.md5()
    content_hash.update(json)
    md5_sum = pkgstat_module.pkgstats[0]['basic_stats']['md5_sum']
    models.Srv4FileStatsBlob(
        md5_sum=md5_sum,
        json=json,
        content_md5_sum=content_hash.hexdigest(),
        mime_type='application/json')

    sqo_pkgstats, pkgstats = relational_util.StatsStructToDatabaseLevelOne(
        md5_sum, False)
    return sqo_pkgstats, pkgstats
示例#2
0
def save(filename, data):
    """Accepts filenamestring and a list of objects, probably dictionaries.
        Writes these to a file with each object pickled using json on each line.
    """
    if isinstance(data, dict):
        # save dict values recursively in separate files
        filename,ext = (filename.rsplit('.',1) if '.' in filename else (filename,''))

        for k,v in data.iteritems():
            save('{0}-{1}.{2}'.format(filename, k, ext), v)
    else:
        if isinstance(data, numpy.ndarray):
            # save numpy using its own thing for speed; .gz auto compresses
            numpy.savetxt(filename + '.npy.gz', data)
        elif isinstance(data, list) and isinstance(data[0], numpy.ndarray):
            # is a big list of arrays, so save in one big file
            numpy.savez(filename + '.npy.list', *data)
        else:
            with open(filename, 'w') as f:
                if hasattr(data, '__iter__'):
                    for i,d in enumerate(data):
                        if i != 0:
                            f.write('\n')
                        f.write(cjson.encode(d))
                else:
                    f.write(cjson.encode(data))
示例#3
0
 def got_command(self, dispatcher, command, headers):
     if command == "summary":
         summary = {'ip'            : RelayConfig.relay_ip,
                    'version'       : __version__,
                    'status'        : self.status,
                    'uptime'        : int(time() - self.start_time),
                    'session_count' : len(self.session_manager.sessions),
                    'stream_count'  : self.session_manager.stream_count,
                    'bps_relayed'   : self.session_manager.bps_relayed}
         return cjson.encode(summary)
     elif command == "sessions":
         return cjson.encode(self.session_manager.statistics)
     elif command == "update":
         if self.graceful_shutdown or self.shutting_down:
             if not self.session_manager.has_session(**headers):
                 log.debug("cannot add new session: media-relay is shutting down")
                 return 'halting'
         try:
             local_media = self.session_manager.update_session(dispatcher, **headers)
         except RelayPortsExhaustedError:
             log.error("Could not reserve relay ports for session, all allocated ports are being used")
             return "error"
         if local_media:
             return " ".join([RelayConfig.advertised_ip or local_media[0][0]] + [str(media[1]) for media in local_media])
     else: # remove
         session = self.session_manager.remove_session(**headers)
         if session is None:
             return "error"
         else:
             return cjson.encode(session.statistics)
示例#4
0
    def _marshaled_dispatch(self, data, dispatch_method = None):
        id = None
        try:
            req = cjson.decode(data)
            method = req['method']
            params = req['params']
            id     = req['id']

            if dispatch_method is not None:
                result = dispatch_method(method, params)
            else:
                result = self._dispatch(method, params)
            response = dict(id=id, result=result, error=None)
        except:
            extpe, exv, extrc = sys.exc_info()
            err = dict(type=str(extpe),
                       message=str(exv),
                       traceback=''.join(traceback.format_tb(extrc)))
            response = dict(id=id, result=None, error=err)
        try:
            return cjson.encode(response)
        except:
            extpe, exv, extrc = sys.exc_info()
            err = dict(type=str(extpe),
                       message=str(exv),
                       traceback=''.join(traceback.format_tb(extrc)))
            response = dict(id=id, result=None, error=err)
            return cjson.encode(response)
示例#5
0
文件: graph.py 项目: elishacook/groof
 def save(self):
     self.storage.start_txn()
     try:
         for k in self._local.removed_edges:
             try:
                 del self.storage.left[k]
                 del self.storage.left[invert_edge_key(k)]
             except KeyError:
                 pass
         for node_id in self._local.removed_edges:
             node_key = pack_node_key(node_id)
             if node_key in self.storage.node:
                 for k in self.storage.left.match_prefix(node_key):
                     del self.storage.left[k]
                 for k in self.storage.right.match_prefix(node_key):
                     del self.storage.right[k]
                 del self.storage.node[node_key]
         for n in self._local.dirty_nodes:
             self.storage.node[pack_node_key(n.id)] = cjson.encode(n._attrs)
         for e in self._local.dirty_edges:
             k = pack_edge_key(e.left_id, e.rel, e.right_id)
             self.storage.left[k] = cjson.encode(e._attrs)
             self.storage.right[invert_edge_key(k)] = ''
         self._reset_change_buffers()
         num_new_nodes = self.next_node_id - self.last_node_id
         if num_new_nodes > 0:
             self.storage.node[struct.pack('i', 0)] = struct.pack('Q', self.next_node_id)
             self.last_node_id = self.next_node_id
         self.storage.commit_txn()
     except:
         self.storage.abort_txn()
         raise
示例#6
0
def _AggregateResponse(names, blocks, accept):
  """Retrieve Aggregate results, and add them to the response.

  Args:
    names - List of Taba Names to retrieve Aggregates for.
    blocks - List of Taba Name Block IDs to retrieve Aggregates for.
    accept - MIME type in which to format the output.

  """
  names = names_param.value

  if names_param.isGlob:
    all_names = []
    names = fnmatch.filter(global_taba_server.GetNames(), names_param.value)

  if names != None and len(names) == 0:
    aggregates = []

  else: # Retrieve the requested Aggregates.
    aggregates = global_taba_server.GetAggregates(names, blocks)

  # Format the response in accordance to the Accept header.
  if accept == 'text/json':
    juno.append(cjson.encode(dict([a for a in aggregates])))

  else:
    for agg_name, aggregate in aggregates:
      juno.append(agg_name).append(": ").append(cjson.encode(aggregate))
      juno.append("\n")
示例#7
0
def rename_ingestion(request, ing_id):
    """
    Rename ingestion (regarding permissions)
    """
    # FIXME: make a post query, not passing ing_id as parameter!
    name = request.POST.get('value', None)
    if not name:
        # Fails silently
        return HttpResponse(str(ing.name), mimetype = 'text/plain')
    try:
        ing = Ingestion.objects.get(id = ing_id)
    except:
        return HttpResponse(json.encode({'error': "This ingestion has not been found"}), mimetype = 'text/plain')

    perms = get_entity_permissions(request, target = 'ingestion', key = int(ing.id))
    if not perms['currentUser']['write']:
        return HttpResponse(json.encode({'error': "You don't have permission to delete this ingestion"}), mimetype = 'text/plain')

    orig = ing.label
    try:
        ing.label = name
        ing.save()
    except IntegrityError:
        # Name not available
        return HttpResponse(json.encode({'old': orig, 'error': "An ingestion named <b>%s</b> already exists.<br/>Cannot rename ingestion." % name}), mimetype = 'text/plain')

    return HttpResponse(str(name), mimetype = 'text/plain')
示例#8
0
 def indexDocs(self):
   for root, _, filenames in os.walk(self.directory):
     for filename in filenames:
       try:
         path = os.path.join(root, filename)
         print path
         f = open(path, 'r')
         # every line in the file is a tweet document to be indexed
         for line in f:
           tweet = cjson.decode(line)
           doc = Document()
           doc.add(Field("tx", tweet['tx'], Field.Store.YES,
                         Field.Index.NOT_ANALYZED))
           doc.add(Field("h", cjson.encode(tweet['h']), Field.Store.YES,
                         Field.Index.NOT_ANALYZED))
           doc.add(Field("t", cjson.encode(tweet['t']), Field.Store.YES,
                         Field.Index.NOT_ANALYZED))
           doc.add(Field("ats", cjson.encode(tweet['ats']), Field.Store.YES,
                         Field.Index.NOT_ANALYZED))
           
           doc.add(Field("w", " ".join(tweet['w']), Field.Store.NO,
                   Field.Index.ANALYZED))
           
           doc.add(Field("user", cjson.encode(tweet['user']), Field.Store.YES,
                         Field.Index.NOT_ANALYZED))
           self.writer.addDocument(doc)
         f.close()
       except Exception, e:
         print "Failed in indexDocs:", e
示例#9
0
文件: tcpclient.py 项目: deavid/irpc
    def prepare(self, id="auto"):
        # Complete all stuff here and set the bytes to send
        if self.prepared:
            return

        if id == "auto":
            id = self.getUnqueuedRandID()
        if id is None:
            idobj = ""
        else:
            idobj = "@" + id
        self.queuedAnswer = self.chatter.language.cmds.answer.queueAnswerFor(id, self.autoremove_id)

        trama_args = ["!%s%s" % (self.command, idobj)]
        for arg in self.local_args:
            tr1 = ":" + arg
            trama_args.append(tr1)

        for k, arg in self.local_kwargs.items():
            tr1 = k + ":" + arg
            trama_args.append(tr1)

        for arg in self.args:
            val = cjson.encode(arg)
            tr1 = "=" + val
            trama_args.append(tr1)

        for k, arg in self.kwargs.items():
            val = cjson.encode(arg)
            tr1 = k + "=" + val
            trama_args.append(tr1)

        self.trama = "\t".join(trama_args) + "\n"

        self.prepared = True
示例#10
0
	def newConfiguration(self, formdata):
		cherrypy.response.headers['Content-Type'] = 'application/json'
		
		formdata = cjson.decode(formdata)
		print formdata
		
		error = ""
		
		if self.configurations.has_key(formdata["configname"]) == False:
			if formdata["port"] != "":
				if self.checkFreePorts(formdata["protocol"], formdata["port"]):
					formdata["manport"] = self.getFreePorts(formdata["configname"])[1]
				else:
					result = False
					error = "port is occupied"
					return cjson.encode({"result": result, "error": error})
			else:
				freeport = self.getFreePorts(formdata["configname"])
				formdata["port"] = freeport[0]
				formdata["manport"] = freeport[1]
			
			newConf = Configuration(0, formdata["configname"], "")
			newConf.new(formdata)
			
			self.configurations[newConf.id] = copy(newConf)
			self.setConfiguration(newConf.id)
			result = True
		else:
			result = False
			error = "name exists"
		
		self.loadConfigurations()
		
		return cjson.encode({"result": result, "error": error})
示例#11
0
def mobile_authenticate(request,**kwargs):
    username = request.REQUEST.get('username', 'empty')
    password = request.REQUEST.get('password','empty')
    user = authenticate(username=username, password=password)
    observer = None
    role = 0
    try:
        observer = Observer.objects.get(user__username=username)
        role = 1 if user.is_superuser else 2
    except:
        pass
    if user is not None and observer is not None:
        return HttpResponse(cjson.encode( {
               'status':'SUCCESS',
               'code':200,
               'message': [
                    {
                        "uuid": observer.uuid,
                        "role": role,
                    }
                ]}))
    else:
        message = unicode('UNAUTHORIZED:Invalid credentials!')
        logging.warn(message)
        logging.debug(u'User' + username)
        return HttpResponse(cjson.encode({
                'status':'FAIL',
                'code':401, 
                'message': []}))
示例#12
0
def _RawResponse(clients_param, names_param, blocks, accept):
  """Retrieve raw State results, and add them to the response.

  Args:
    clients_param - Param object containing either client_id or client glob 
                    to retrieve State objects for.
    names_param - Param object containing either List of Taba Names, 
                  or a names_glob to retrieve State objects for.
    blocks - List of Taba Name Block IDs to retrieve State objects for.
    accept - MIME type in which to format the output.
  """

  clients, names = _GetClientsAndNames(clients_param, names_param)

  if (clients != None and len(clients) == 0) \
      or (names != None and len(names) == 0):
    states = []
  else:
    # Get the requested State objects.
    states = global_taba_server.GetStates(clients, names, blocks)

  # Format the response in accordance with the Accept header.
  if accept == 'text/json':
    juno.append(cjson.encode(dict([s for s in states])))

  else:
    for (state_client_id, state_name), state in states:
      juno.append("(").append(state_client_id).append(", ")
      juno.append(state_name).append("): ")
      juno.append(cjson.encode(state)).append("\n")
示例#13
0
 def _test__http_error(self, json):
     error_data = dict(a=1, b=2)
     code = 415 # just some code...
     headers = FakeHeaders()
     if json:
         headers._headers['content-type'] = 'application/json'
     send_data = dict(c=2, b=3)
     self._expect_json_rest_request('POST', self.uri, send_data).and_raise(
         urllib2.HTTPError(self.uri, code, 'msg', headers, StringIO(cjson.encode(error_data)))
         )
     with self.forge.verified_replay_context():
         with self.assertRaises(JSONRestRequestException) as caught:
             self.sender.post(data=send_data)
     exception = caught.exception
     #make sure the exception is printable
     self.assertIsInstance(str(exception), basestring)
     self.assertIsInstance(repr(exception), basestring)
     self.assertEquals(exception.code, code)
     self.assertIs(exception.sent_data, send_data)
     self.assertEquals(exception.url, self.uri)
     if json:
         self.assertEquals(exception.received_data, error_data)
     else:
         self.assertIsInstance(exception.received_data, Raw)
         self.assertEquals(exception.received_data.data, cjson.encode(error_data))
 def fetch_gene_sequences(self):
     print "Fetching family FASTA files..."
     try:
         self.geneSequences = cjson.decode(open('%s/gene_sequences.json'%run_name).read())
     except:
         self.geneSequences = {}
     genes_to_fetch = list(set(self.geneToSpecies.iterkeys())-set(self.geneSequences.iterkeys()))
     if len(genes_to_fetch) == 0:
         print "All genes already fetched."
         return
     self.genes_fetched=0
     self.total_genes_to_fetch = len(genes_to_fetch)
     while len(genes_to_fetch) > 0:
         # Chop off a chunk of 1000 genes, fetch them, write them to the output file
         current_chunk = genes_to_fetch[:1000]
         genes_to_fetch = genes_to_fetch[1000:]
         for g in downloader_pool.imap(fetch_gene, current_chunk):
             i += 1
             total_genes_to_fetch = len(genes_to_fetch) + len(current_chunk)                
         fname='%s/gene_sequences.json'%run_name
         if os.path.isfile(fname):
             os.rename(fname,fname+'_old')
             open(fname,'w').write(cjson.encode(self.geneSequences))
             os.unlink(fname+'_old')
         else:
             open(fname,'w').write(cjson.encode(self.geneSequences))
     print "Done."
示例#15
0
 def GET(self, catrel_name, arch_name, osrel_name, catalogname):
   """Get a srv4 reference by catalog ane catalogname."""
   web.header('Access-Control-Allow-Origin', '*')
   try:
     sqo_osrel, sqo_arch, sqo_catrel = models.GetSqoTriad(
         osrel_name, arch_name, catrel_name)
   except sqlobject.main.SQLObjectNotFound:
     raise web.notfound(
         cjson.encode({'message': 'catalog %s %s %s not found'
                                  % (osrel_name, arch_name, catrel_name)}))
   join = [
       sqlbuilder.INNERJOINOn(None,
         models.Srv4FileInCatalog,
         models.Srv4FileInCatalog.q.srv4file==models.Srv4FileStats.q.id),
   ]
   res = models.Srv4FileStats.select(
       sqlobject.AND(
         models.Srv4FileInCatalog.q.osrel==sqo_osrel,
         models.Srv4FileInCatalog.q.arch==sqo_arch,
         models.Srv4FileInCatalog.q.catrel==sqo_catrel,
         models.Srv4FileStats.q.catalogname==catalogname,
         models.Srv4FileStats.q.use_to_generate_catalogs==True),
       join=join,
   )
   try:
     srv4 = res.getOne()
     mimetype, data = srv4.GetRestRepr(quick=True)
     web.header('Content-type', mimetype)
     return cjson.encode(data)
   except sqlobject.main.SQLObjectNotFound:
     data = {'message': 'no %s %s %s %s packages found'
             % (catrel_name, arch_name, osrel_name, catalogname)}
     raise web.notfound(cjson.encode(data))
   except sqlobject.dberrors.OperationalError as exc:
     raise web.internalerror(exc)
示例#16
0
def get_media(mediaid,field_list = []):
    if type(mediaid) == list:
        return_list = []
        no_cache_list = []
        temp_map = {}
        for i in mediaid:
            media = cache.get(MEDIA_HEAD+str(i).strip())
            if media:
                temp_map[str(i).strip()] = get_field_data_form_json(media,field_list)
            else:
                no_cache_list.append(str(i).strip())
        media_list = MediaDetailInfo.objects.filter(mediaid__in=no_cache_list)
        for i in media_list:
            temp_map[i.mediaid] = get_field_data_form_obj(i)
            cache.set(MEDIA_HEAD+str(i.mediaid),cjson.encode(get_field_data_form_obj(i)),MEDIA_CACHE_TIME)
        for i in mediaid:
            if temp_map.has_key(str(i).strip()):
                return_list.append(temp_map[str(i).strip()])
        return return_list
    else:
        media = cache.get(MEDIA_HEAD+str(mediaid))
        if media:
            return get_field_data_form_json(media,field_list)
        else:
            media_obj = MediaDetailInfo.objects.filter(mediaid=mediaid)
            if len(media_obj) > 0:
                cache.set(MEDIA_HEAD+str(mediaid),cjson.encode(get_field_data_form_obj(media_obj[0])),MEDIA_CACHE_TIME)
                return get_field_data_form_obj(media_obj[0],field_list)
            else:
                media_obj = get_detail_from_spider(mediaid)
                return get_field_data_form_obj(media_obj,field_list)
示例#17
0
文件: pkgdb_web.py 项目: dago/opencsw
 def GET(self, catrel_name, arch_name, osrel_name, pkgname):
   """Get a srv4 reference by catalog ane pkgname."""
   configuration.SetUpSqlobjectConnection()
   sqo_osrel, sqo_arch, sqo_catrel = pkgdb.GetSqoTriad(
       osrel_name, arch_name, catrel_name)
   join = [
       sqlbuilder.INNERJOINOn(None,
         models.Srv4FileInCatalog,
         models.Srv4FileInCatalog.q.srv4file==models.Srv4FileStats.q.id),
       sqlbuilder.INNERJOINOn(None,
         models.Pkginst,
         models.Pkginst.q.id==models.Srv4FileStats.q.pkginst),
   ]
   res = models.Srv4FileStats.select(
       sqlobject.AND(
         models.Srv4FileInCatalog.q.osrel==sqo_osrel,
         models.Srv4FileInCatalog.q.arch==sqo_arch,
         models.Srv4FileInCatalog.q.catrel==sqo_catrel,
         models.Pkginst.q.pkgname==pkgname,
         models.Srv4FileStats.q.use_to_generate_catalogs==True),
       join=join,
   )
   try:
     srv4 = res.getOne()
     mimetype, data = srv4.GetRestRepr()
     web.header('Content-type', mimetype)
     web.header('Access-Control-Allow-Origin', '*')
     return cjson.encode(data)
   except sqlobject.main.SQLObjectNotFound:
     return cjson.encode(None)
   except sqlobject.dberrors.OperationalError, e:
     raise web.internalerror(e)
示例#18
0
def _ProjectionResponse(clients_param, names_param, blocks, accept):
  """Retrieve Projection results, and add them to the response.

  Args:
    clients_param - Param object containing either client_id or client glob 
                    to retrieve Projections for.
    names_param - Param object containing either List of Taba Names, 
                  or a names_glob to retrieve Projections for.
    blocks - List of Taba Name Block IDs to retrieve Projections for.
    accept - MIME type in which to format the output.
  """
  clients, names = _GetClientsAndNames(clients_param, names_param)

  if (clients != None and len(clients) == 0) \
      or (names != None and len(names) == 0):
    projections = []

  else:
    # Retrieve the requested Projections.
    projections = global_taba_server.GetProjections(clients, names, blocks)

  # Render the Projection objects according to the requested format.
  if accept == 'text/json':
    juno.append(cjson.encode(dict([p for p in projections])))

  else:
    for (proj_client_id, proj_name), projection in projections:
      juno.append("(").append(proj_client_id).append(", ")
      juno.append(proj_name).append("): ")
      juno.append(cjson.encode(projection)).append("\n")
示例#19
0
 def downloadImages():
     services = {'twitpic': HTMLParsers.parseTwitpic, 'yfrog': HTMLParsers.parseYfrog, 'twitrpix': HTMLParsers.parseTwitrpix}
     for f in ['2011_2_%s'%i for i in range(20, 26)]:
         for tweet in Utilities.iterateTweetsFromFile(Settings.new_zealand_pics_folder+'tweets/'+f):
             d = datetime.strptime(tweet['created_at'], Settings.twitter_api_time_format)
             print d
             print cjson.encode(tweet)
             
             service, url = 'twitpic', ''
             if len(tweet['entities']['urls'])>0: url = tweet['entities']['urls'][0]['url']
             else:
                 for service, parseMethod in services.iteritems():
                     if service in tweet['text']:
                         for term in tweet['text'].split():
                             if service in term: 
                                 url = term
                                 break
             url = url.replace('\\', '')
             if not url.startswith('http'): url = 'http://'+url
             for service, parseMethod in services.iteritems():
                 if service in url:
                     id = tweet['id']
                     fileName = Settings.new_zealand_pics_folder+Utilities.getDataFile(d)+'/%s_%s.jpeg'%(str(d).replace(' ', '_'), id)
                     print url, fileName
                     folder = '/'.join(fileName.split('/')[:-1])
                     if not os.path.exists(folder): os.makedirs(folder, 0777)
                     if not os.path.exists(fileName):
                         retry, notParsed = 0, True
                         while retry<3 and notParsed:
                             try:
                                 parseMethod(url, fileName)
                                 notParsed = False
                                 time.sleep(3)
                             except: retry+=1
示例#20
0
def home(request):
    """Top level url
    
    Displays ::
        {"path": HttpRequest.path, 
         "host": HttpRequest.get_host(), 
         "version": sana.api.version, 
         "service": "REST"}
    """
    username = request.REQUEST.get('username', 'empty')
    password = request.REQUEST.get('password','empty')
    user = authenticate(username=username, password=password)
    if user is not None:
        return HttpResponse(cjson.encode( {
               'status':'SUCCESS',
               'code':200,
               'message': version()}))
    else:
        message = unicode('UNAUTHORIZED:Invalid credentials!')
        logging.warn(message)
        logging.debug(u'User' + username)
        return HttpResponse(cjson.encode({
                'status':'FAIL',
                'code':401, 
                'message': message}))
示例#21
0
 def mqCallback(self, channel, method_frame, header_frame, body):
     try:
         if not self.zk.is_proxy_master():
             return
         # master's business
         data_dict = cjson.decode(body)
         # ** MUST ** ack
         channel.basic_ack(method_frame.delivery_tag)
         utils.log(utils.cur(), body, data_dict)
         if not isinstance(data_dict, dict):
             return
         for db, forbid in data_dict.iteritems():
             if not forbid[Forbid.KEY_TYPE] in (Forbid.FORBID_WORKING, Forbid.FORBID_FOREVER):
                 return
             forbid[Forbid.KEY_START] = time.time()
             path = os.path.join(ZKConf.ZK_PATH_FORBID, db)
             orig = self.get_path(path)
             if orig is False:
                 self.zk.mknode(path, cjson.encode(forbid))
             else:
                 old = cjson.decode(orig)
                 if (
                     old[Forbid.KEY_TYPE] == forbid[Forbid.KEY_TYPE]
                     and old[Forbid.KEY_TYPE] == Forbid.FORBID_WORKING
                     and old[Forbid.KEY_START] + old[Forbid.KEY_DURATION] > time.time()
                 ):
                     utils.log(utils.cur(), "still forbidding")
                 else:
                     utils.log(utils.cur(), "change forbid")
                     # change /database/forbid/db
                     self.forbidinfo[db] = forbid
                     self.zk.set(path, cjson.encode(forbid))
     except Exception, err:
         utils.err(utils.cur(), err)
示例#22
0
def thrash_cjson():
    for obj, tns, json, msh in TESTS:
        if TEST_DUMP_ONLY:
            cjson.encode(obj)
        elif TEST_LOAD_ONLY:
            assert cjson.decode(json) == obj
        else:
            assert cjson.decode(cjson.encode(obj)) == obj
def action_export(conn, tables, excluded_urls):
    items = []
    for table in tables:
        items += get_saved_sessions(conn, table, False)

    items = remove_duplicates(filter_excluded(items))

    print cjson.encode(items)
示例#24
0
文件: common.py 项目: brownplt/Resume
 def toJSON(self, fields=None):
     if fields == None:
         return cjson.encode(self)
     else:
         dct = {}
         for field in fields:
             dct[field] = self.__getattribute__(field)
         return cjson.encode(dct)
示例#25
0
 def PUT(self, name):
   if not re.match(r'', name):
     raise web.conflict()
   with Transaction(models.Srv4FileStats) as trans:
     res = models.CatalogRelease.selectBy(name=name, connection=trans)
     if res.count():
       return cjson.encode('%s already exists' % name)
     models.CatalogRelease(name=name, connection=trans)
   return cjson.encode('%s has been created' % name)
示例#26
0
文件: core.py 项目: nobus/multirog
 def on_message(self, message):
     try:
         data = cjson.decode(message)
         resp = game.notify(self, data)
         self.write_message(cjson.encode(resp))
     except Exception, e:
         print e
         req = cjson.encode({"error": "undefined"})
         self.write_message(req)
示例#27
0
def deleteplayhistory(request):
    res_json = {}
    res_json[RES_STATUS_CODE] = STATUS_SUCCESS
    deviceid = request.GET.get("deviceid","")
    if deviceid:
        History.objects.filter(deviceid=deviceid).delete()
        return HttpResponse(cjson.encode(res_json))
    res_json[RES_STATUS_CODE] = STATUS_ARGUMENT_ERROR
    return HttpResponse(cjson.encode(res_json))
示例#28
0
def add_test(v):
    #  These modules have a few round-tripping problems...
    try:
        assert cjson.decode(cjson.encode(v)) == v
        assert yajl.loads(yajl.dumps(v)) == v
    except Exception:
        pass
    else:
        TESTS.append((v,tnetstring.dumps(v),cjson.encode(v)))
示例#29
0
 def searchKeywords():
     keywords = sys.argv[1:]
     japan_bb=[30, 42, 129, 145]
     for f in ['2011_3_10.gz', '2011_3_11.gz']:
         for tweet in Utilities.iterateTweetsFromGzip(Settings.geo_folder+f):
             if Utilities.tweetInBoundingBox(tweet, japan_bb):
                 for k in keywords:
                     if k in tweet['text']: 
                         print cjson.encode(tweet)
示例#30
0
		def output_transformation(line):
			if not line:
				return ''
			if 'seq' in line:
				line['seq'] = base64.urlsafe_b64encode(
					zlib.compress(cjson.encode(line['seq']), 1))
			elif 'last_seq' in line:
				line['last_seq'] = base64.urlsafe_b64encode(
					zlib.compress(cjson.encode(line['last_seq']), 1))
			return cjson.encode(line) + '\n'
示例#31
0
 def jdumps(obj, encoding='utf-8'):
     """
     Serializes ``obj`` to a JSON formatted string, using cjson.
     """
     return cjson.encode(obj)
示例#32
0
            new_creator_addr['s'] = creator_addr['state']
        if 'county' in creator_addr.keys():
            new_creator_addr['c'] = creator_addr['county']
        line['c_addr'] = new_creator_addr

        user_url = web + user_lat + user_lon  #+extra
        user_info = urllib2.urlopen(user_url)
        user_info = user_info.read()
        user_addr = cjson.decode(user_info)['address']
        #            print user_addr
        new_user_addr = {}
        new_user_addr['c_code'] = user_addr['country_code']
        if 'state' in user_addr.keys():
            new_user_addr['s'] = user_addr['state']
        if 'county' in user_addr.keys():
            new_user_addr['c'] = user_addr['county']
        line['u_addr'] = new_user_addr
        line = cjson.encode(line)
        outfile.write(line + '\n')
    except:
        print "error reading web"
        #pass
        #sys.exit()
##else:
#        pass
#from httplib2 import Http
#h=Http()
#resp,content=h.request(std,'GET')
#content=cjson.decode(content)
#print content['county']
示例#33
0
    #elif freq1 ==freq2 and common>=3:
    #    if ' ' in key1:
    #        removelist.append(key2)
    #    elif ' ' in key2:
    #        removelist.append(key1)
#print removelist
#print'_______1_______'
rulelist = []
outfile2 = sys.argv[3]  #'/spare/wei/local/rulelist_en_ch3-log'

out2 = open(outfile2, 'w')
for key, value in relation.iteritems():
    if key in removelist:
        print key
        continue
    for key1, value1 in value.iteritems():
        if key1 in removelist:
            continue
        if value1[0] > 0.1 and key != key1:
            if (key in key1 or key1 in key) and value1[0] == 1:
                continue
            else:
                rulelist.append([key1, key, value1[0]])
rules = sorted(rulelist, key=itemgetter(2), reverse=True)
for rule in rules:
    out2.write(cjson.encode(rule) + '\n')
#print dist1
#print dist2
removelistf = open(sys.argv[4], 'w')
removelistf.write(cjson.encode(removelist))
示例#34
0
import cjson
sift = 'tag_user_dict_sift_1'
f = open(sift, 'w')
i = 0

for line in open('tag_user_dict_sift', 'r'):
    #    if i<22414:
    #        i+=1
    #        continue
    #line=cjson.decode(line)
    #line=line.split('\t')[1]
    line = cjson.decode(line)
    if line[0][-2:-1] != 'of' and line[0][-3:-1] != 'and':
        f.write(cjson.encode(line) + '\n')
    else:
        pass
    #print line
    #line=cjson.decode(line)
    #f.write(line+'\n')
示例#35
0
#remove tag that are not in the tag_user_dict
import sys
import cjson
argv = sys.argv
#inputfilename unique_tag outputfilename
infile1 = open(argv[1], 'r')
infile2 = open(argv[2], 'r')
tag_f = cjson.decode(infile2.readline())
tag_f = tag_f.keys()
outfile = open(argv[3], 'w')
distinct = open(sys.argv[4], 'w')
distinctdict = {}
distinctlc = {}
for line in infile1:
    line = line.split('\t')[1]
    line = cjson.decode(line)
    dictt = {}
    for key, value in line[1].iteritems():
        if key in tag_f and value > 0.01:
            dictt[key] = round(value, 2)
            distinctdict[key] = distinctdict.get(key, 0)
            if value > 1:
                distinctlc[key] = distinctlc.get(key, 0)
    if dictt != {}:
        outfile.write(cjson.encode([line[0], dictt]) + '\n')
distinct.write(cjson.encode(distinctdict) + '\n')
distinct.write(cjson.encode(distinctlc) + '\n')
示例#36
0
    max_can = ''
    for node in G.nodes():
        key1, key2 = tag + '_' + node, node + '_' + tag
        b = cosine.get(key1, 0)
        c = cosine.get(key2, 0)
        similarity = max(b, c)
        if similarity > max_can_value:
            max_can_value = similarity
            max_can = node
    if max_can_value > THRESHOLD:
        G.add_node(tag)
        G.add_edge(max_can, tag)
        #print 'new tag',tag
        #print 'root',max_can
        #print graph_dict
        #graph_dict[str(max_can)][tag]={}
        graph_parent_dict[tag] = max_can
    else:
        G.add_node(tag)
        graph_parent_dict[tag] = 'ROOT'

nx.write_gml(G, "noise_0.1_folksonomy_0.3.gml")
print graph_parent_dict
a = build_tree(graph_parent_dict)
print a
outfile2 = open('noise_0.1_folksonomy_0.3', 'w')
print_dict(a, outfile2)
outfile3 = 'noise_0.1_graph_parent_dict_0.3'
outfile3 = open(outfile3, 'w')
outfile3.write(cjson.encode(graph_parent_dict))
示例#37
0
 def dataSent(self, data):
     #print "Server sent : " + str(data)
     self.transport.write(cjson.encode(data) + '\n')
示例#38
0
    def generate(numberOfDocuments=2500, dimensions=52):
        def pickOneByProbability(objects, probabilities):
            initialValue, objectToRange = 0.0, {}
            for i in range(len(objects)):
                objectToRange[objects[i]] = (initialValue,
                                             initialValue + probabilities[i])
                initialValue += probabilities[i]
            randomNumber = random.random()
            for object, rangeVal in objectToRange.iteritems():
                if rangeVal[0] <= randomNumber <= rangeVal[1]: return object

        topics = {
            'elections': {
                'prob': 0.3,
                'tags': {
                    '#gop': 0.4,
                    '#bachmann': 0.2,
                    '#perry': 0.2,
                    '#romney': 0.2
                }
            },
            'soccer': {
                'prob': 0.2,
                'tags': {
                    '#rooney': 0.15,
                    '#chica': 0.1,
                    '#manutd': 0.6,
                    '#fergie': 0.15
                }
            },
            'arab': {
                'prob': 0.3,
                'tags': {
                    '#libya': 0.4,
                    '#arab': 0.3,
                    '#eqypt': 0.15,
                    '#syria': 0.15
                }
            },
            'page3': {
                'prob': 0.2,
                'tags': {
                    '#paris': 0.2,
                    '#kim': 0.4,
                    '#britney': 0.2,
                    '#khloe': 0.2
                }
            },
        }
        stopwords = 'abcdefghijklmnopqrstuvwxyz1234567890'

        print '#', cjson.encode({'dimensions': dimensions})
        for i in range(numberOfDocuments):
            topic = pickOneByProbability(
                topics.keys(), [topics[k]['prob'] for k in topics.keys()])
            print ' '.join([topic] + [
                pickOneByProbability(topics[topic]['tags'].keys(), [
                    topics[topic]['tags'][k]
                    for k in topics[topic]['tags'].keys()
                ]) for i in range(2)
            ] + [random.choice(stopwords) for i in range(5)])
示例#39
0
def serialize(feature_gene, hits):
    doc = {}
    doc['y'], doc['x'], doc['gene'], doc['barcode'] = feature_gene
    doc['hits'] = hits
    return cjson.encode(doc)
示例#40
0
def encode_json(obj):
    """Wrapper to re-encode JSON string in an implementation-independent way."""
    # TODO: Verify correctness of cjson
    return cjson.encode(obj)
示例#41
0
    def main(self):
        twitterurl = "http://api.twitter.com/1/users/search.json"

        if self.proxy:
            proxyhandler = urllib2.ProxyHandler({"http": self.proxy})
            twitopener = urllib2.build_opener(proxyhandler)
            urllib2.install_opener(twitopener)

        headers = {'User-Agent': "BBC R&D Grabber"}
        postdata = None

        if self.keypair == False:
            # Perform OAuth authentication - as we don't have the secret key pair we need to request it
            # This will require some user input
            request_token_url = 'http://api.twitter.com/oauth/request_token'
            access_token_url = 'http://api.twitter.com/oauth/access_token'
            authorize_url = 'http://api.twitter.com/oauth/authorize'

            token = None
            consumer = oauth.Consumer(key=self.consumerkeypair[0],
                                      secret=self.consumerkeypair[1])

            params = {
                'oauth_version': "1.0",
                'oauth_nonce': oauth.generate_nonce(),
                'oauth_timestamp': int(time.time()),
            }

            params['oauth_consumer_key'] = consumer.key

            req = oauth.Request(method="GET",
                                url=request_token_url,
                                parameters=params)

            signature_method = oauth.SignatureMethod_HMAC_SHA1()
            req.sign_request(signature_method, consumer, token)

            requestheaders = req.to_header()
            requestheaders['User-Agent'] = "BBC R&D Grabber"

            # Connect to Twitter
            try:
                req = urllib2.Request(
                    request_token_url, None, requestheaders
                )  # Why won't this work?!? Is it trying to POST?
                conn1 = urllib2.urlopen(req)
            except httplib.BadStatusLine:
                e = sys.exc_info()[1]
                Print("PeopleSearch BadStatusLine error:", e)
                conn1 = False
            except urllib2.HTTPError:
                e = sys.exc_info()[1]
                Print("PeopleSearch HTTP error:", e.code)
                #                sys.stderr.write('PeopleSearch HTTP error: ' + str(e.code) + '\n')
                conn1 = False
            except urllib2.URLError:
                e = sys.exc_info()[1]
                Print("PeopleSearch URL error: ", e.reason)
                #                sys.stderr.write('PeopleSearch URL error: ' + str(e.reason) + '\n')
                conn1 = False

            if conn1:
                content = conn1.read()
                conn1.close()

                request_token = dict(urlparse.parse_qsl(content))

                Print("Request Token:")
                Print("     - oauth_token        = ",
                      request_token['oauth_token'])
                Print("     - oauth_token_secret = ",
                      request_token['oauth_token_secret'])
                Print("")

                # The user must confirm authorisation so a URL is Printed here
                Print("Go to the following link in your browser:")
                Print("%s?oauth_token=%s" %
                      (authorize_url, request_token['oauth_token']))
                Print("")

                accepted = 'n'
                # Wait until the user has confirmed authorisation
                while accepted.lower() == 'n':
                    accepted = raw_input('Have you authorized me? (y/n) ')
                oauth_verifier = raw_input('What is the PIN? ')

                token = oauth.Token(request_token['oauth_token'],
                                    request_token['oauth_token_secret'])
                token.set_verifier(oauth_verifier)

                params = {
                    'oauth_version': "1.0",
                    'oauth_nonce': oauth.generate_nonce(),
                    'oauth_timestamp': int(time.time()),
                }

                params['oauth_token'] = token.key
                params['oauth_consumer_key'] = consumer.key

                req = oauth.Request(method="GET",
                                    url=access_token_url,
                                    parameters=params)

                signature_method = oauth.SignatureMethod_HMAC_SHA1()
                req.sign_request(signature_method, consumer, token)

                requestheaders = req.to_header()
                requestheaders['User-Agent'] = "BBC R&D Grabber"
                # Connect to Twitter
                try:
                    req = urllib2.Request(
                        access_token_url, "oauth_verifier=%s" % oauth_verifier,
                        requestheaders
                    )  # Why won't this work?!? Is it trying to POST?
                    conn1 = urllib2.urlopen(req)
                except httplib.BadStatusLine:
                    e = sys.exc_info()[1]
                    #                    sys.stderr.write('PeopleSearch BadStatusLine error: ' + str(e) + '\n')
                    Print('PeopleSearch BadStatusLine error: ', e)
                    conn1 = False
                except urllib2.HTTPError:
                    e = sys.exc_info()[1]
                    Print('PeopleSearch HTTP error: ', e.code)
                    conn1 = False
                except urllib2.URLError:
                    e = sys.exc_info()[1]
                    #                    sys.stderr.write('PeopleSearch URL error: ' + str(e.reason) + '\n')
                    Print('PeopleSearch URL error: ', e.reason)
                    conn1 = False

                if conn1:
                    content = conn1.read()
                    conn1.close()
                    access_token = dict(urlparse.parse_qsl(content))

                    # Access tokens retrieved from Twitter
                    Print("Access Token:")
                    Print("     - oauth_token        = ",
                          access_token['oauth_token'])
                    Print("     - oauth_token_secret = ",
                          access_token['oauth_token_secret'])
                    Print("")
                    Print(
                        "You may now access protected resources using the access tokens above."
                    )
                    Print("")

                    save = False
                    # Load config to save OAuth keys
                    try:
                        homedir = os.path.expanduser("~")
                        file = open(homedir + "/twitter-login.conf", 'r')
                        save = True
                    except IOError:
                        e = sys.exc_info()[1]
                        Print(
                            "Failed to load config file - not saving oauth keys: ",
                            e)

                    if save:
                        raw_config = file.read()

                        file.close()

                        # Read config and add new values
                        config = cjson.decode(raw_config)
                        config['key'] = access_token['oauth_token']

                        config['secret'] = access_token['oauth_token_secret']

                        raw_config = cjson.encode(config)

                        # Write out the new config file
                        try:
                            file = open(homedir + "/twitter-login.conf", 'w')
                            file.write(raw_config)
                            file.close()
                        except IOError:
                            e = sys.exc_info()[1]
                            Print("Failed to save oauth keys: ", e)

                    self.keypair = [
                        access_token['oauth_token'],
                        access_token['oauth_token_secret']
                    ]

        while not self.finished():
            # TODO: Implement backoff algorithm in case of connection failures - watch out for the fact this could delay the requester component
            if self.dataReady("inbox"):
                # Retieve keywords to look up
                person = self.recv("inbox")

                # Ensure we're not rate limited during the first request - if so we'll wait for 15 mins before our next request
                if (datetime.today() -
                        timedelta(minutes=15)) > self.ratelimited:
                    requesturl = twitterurl + "?q=" + urllib.quote(
                        person) + "&per_page=5"

                    params = {
                        'oauth_version': "1.0",
                        'oauth_nonce': oauth.generate_nonce(),
                        'oauth_timestamp': int(time.time()),
                    }

                    token = oauth.Token(key=self.keypair[0],
                                        secret=self.keypair[1])
                    consumer = oauth.Consumer(key=self.consumerkeypair[0],
                                              secret=self.consumerkeypair[1])

                    params['oauth_token'] = token.key
                    params['oauth_consumer_key'] = consumer.key

                    req = oauth.Request(method="GET",
                                        url=requesturl,
                                        parameters=params)

                    signature_method = oauth.SignatureMethod_HMAC_SHA1()
                    req.sign_request(signature_method, consumer, token)

                    requestheaders = req.to_header()
                    requestheaders['User-Agent'] = "BBC R&D Grabber"

                    # Connect to Twitter
                    try:
                        req = urllib2.Request(
                            requesturl, None, requestheaders
                        )  # Why won't this work?!? Is it trying to POST?
                        conn1 = urllib2.urlopen(req)
                    except httplib.BadStatusLine:
                        e = sys.exc_info()[1]
                        #                        sys.stderr.write('PeopleSearch BadStatusLine error: ' + str(e) + '\n')
                        Print('PeopleSearch BadStatusLine error: ', e)
                        conn1 = False
                    except urllib2.HTTPError:
                        e = sys.exc_info()[1]
                        #                        sys.stderr.write('PeopleSearch HTTP error: ' + str(e.code) + '\n')
                        Print('PeopleSearch HTTP error: ', e.code)
                        conn1 = False
                    except urllib2.URLError:
                        e = sys.exc_info()[1]
                        #                        sys.stderr.write('PeopleSearch URL error: ' + str(e.reason) + '\n')
                        Print('PeopleSearch URL error: ', e.reason)
                        conn1 = False

                    if conn1:
                        # Check rate limiting here and Print current limit
                        headers = conn1.info()
                        try:
                            headerlist = string.split(str(headers), "\n")
                        except UnicodeEncodeError:  # str may fail...
                            headerlist = []
                        for line in headerlist:
                            if line != "":
                                splitheader = line.split()
                                if splitheader[
                                        0] == "X-FeatureRateLimit-Remaining:" or splitheader[
                                            0] == "X-RateLimit-Remaining:":
                                    Print(splitheader[0], " ", splitheader[1])
                                    if int(splitheader[1]) < 5:
                                        self.ratelimited = datetime.today()
                        # Grab json format result of people search here
                        try:
                            data = conn1.read()
                            try:
                                content = cjson.decode(data)
                                self.send(content, "outbox")
                            except cjson.DecodeError:
                                self.send(dict(), "outbox")
                        except IOError:
                            e = sys.exc_info()[1]
                            #                            sys.stderr.write('PeopleSearch IO error: ' + str(e) + '\n')
                            Print('PeopleSearch IO error: ', e)
                            self.send(dict(), "outbox")
                        conn1.close()
                    else:
                        self.send(dict(), "outbox")
                else:
                    Print("Twitter search paused - rate limited")
                    self.send(dict(), "outbox")
            self.pause()
            yield 1
示例#42
0
def error(exception):
    #errors.append(exception)
    #errors = exception if exception else
    errors = traceback.format_exception_only(*sys.exc_info()[:2])
    #.splitlines()[1:]#[]
    #et, val, tb = sys.exc_info()
    #tb = sys.exc_info()[2]
    #trace = traceback.format_tb(tb)
    #for tbm in traceback.format_tb(tb):
    #for trace in traceback.format_exception_only(sys.last_type, sys.last_value):
    #    errors.append(repr(trace))

    response = {
        'status': 'FAILURE',
        'code': 500,
        'message': None,
        'errors': errors,
    }
    return response


def unauthorized(message):
    return fail(message, Codes.UNAUTHORIZED)


if __name__ == 'main':
    import cjson
    data = cjson.encode({'a': '1'})
    js = JSONResponse(data)
    print js._headers
示例#43
0
    key1,key2=key[0],key[1]
    #freq1=tagdict[key1+'_'+key1]
    #freq2=tagdict[key2+'_'+key2]
    freq1=tag_freq_dict[key1]
    freq2=tag_freq_dict[key2]
    common=tagdict.get(key1+'_'+key2,tagdict.get(key2+'_'+key1))
    if common>=3 and freq1>=freq2:
        weight=(1-(log(freq1)-log(freq2))/lMAXF)
        #print weight
        #weight=(1-(freq1-freq2)/MAXF)

        relation[key1][key2]=common/freq2*weight
    elif common>=3 and freq2>=freq1:
        weight=(1-(log(freq2)-log(freq1))/lMAXF)
        #weight=(1-(freq2-freq1)/MAXF)
        #print weight
        relation[key2][key1]=common/freq1*weight

print'_______1_______'
rulelist=[]
outfile2='/spare/wei/local/rulelist_en_ch3-log'

out2=open(outfile2,'w')
for key,value in relation.iteritems():
    for key1,value1 in value.iteritems():
        if value1>0.0000001 and value1!=1 and key!=key1:
            rulelist.append([key1,key,value1])
rules=sorted(rulelist,key=itemgetter(2),reverse=True)
for rule in rules:
    out2.write(cjson.encode(rule)+'\n')
示例#44
0
文件: nmsg.py 项目: rainfly123/fuwav2
    def get(self):
        userid = self.get_argument("userid", strip=True)
        amount = self.get_argument("amount", strip=True)

        data = mysql.YuheSub(userid, amount)
        self.write(cjson.encode(data))
示例#45
0
#generating parent child dictionary like{food:{parent:media,child:[......]}} from
#graph_parent_dict

import cjson
local = '/spare/wei/local/%s'
infile = local % 'graph_parent_dict_chi'
infile = open(infile, 'r')
graph_parent_dict = cjson.decode(infile.readline())
p_c_dict = {}
for key, value in graph_parent_dict.iteritems():
    if key not in p_c_dict.keys():
        p_c_dict[key] = {'parent': value, 'child': []}
    else:
        p_c_dict[key]['parent'] = value
    if value not in p_c_dict.keys():
        p_c_dict[value] = {'parent': 'a', 'child': [key]}
    else:
        p_c_dict[value]['child'].append(key)
outfile = open(local % 'p_c_dict_chi', 'w')
outfile.write(cjson.encode(p_c_dict))
示例#46
0
    def main(self):
        cursor = self.dbConnect()
        while not self.finished():
            twitdata = list()
            # Collect all current received tweet JSON and their related PIDs into a twitdata list
            while self.dataReady("inbox"):
                pids = list()
                data = self.recv("inbox")
                for pid in data[1]:
                    pids.append(pid)
                twitdata.append([data[0], pids])
            if len(twitdata) > 0:
                # Process the received twitdata
                for tweet in twitdata:
                    tweet[0] = tweet[0].replace(
                        "\\/", "/"
                    )  # Fix slashes in links: This may need moving further down the line - ideally it would be handled by cjson
                    if tweet[0] != "\r\n":  # If \r\n is received, this is just a keep alive signal from Twitter every 30 secs
                        # At this point, each 'tweet' contains tweetdata, and a list of possible pids
                        newdata = cjson.decode(tweet[0])
                        if newdata.has_key('delete') or newdata.has_key(
                                'scrub_geo') or newdata.has_key('limit'):
                            # Keep a record of all requests from Twitter for deletions, location removal etc
                            # As yet none of these have been received, but this code will store them if they are received to enable debugging
                            filepath = "contentDebug.txt"
                            if os.path.exists(filepath):
                                file = open(filepath, 'r')
                                filecontents = file.read()
                            else:
                                filecontents = ""
                            file = open(filepath, 'w')
                            file.write(filecontents + "\n" +
                                       str(datetime.utcnow()) + " " +
                                       cjson.encode(newdata))
                            file.close()
                        else:
                            # This is a real tweet
                            tweetid = newdata['id']
                            print "New tweet! @" + newdata['user'][
                                'screen_name'] + ": " + newdata['text']
                            for pid in tweet[1]:
                                # Cycle through possible pids, grabbing that pid's keywords from the DB
                                # Then, check this tweet against the keywords and save to DB where appropriate (there may be more than one location)
                                cursor.execute(
                                    """SELECT keyword,type FROM keywords WHERE pid = %s""",
                                    (pid))
                                data = cursor.fetchall()
                                for row in data:
                                    # Some keywords are stored with a ^. These must be split, and the tweet checked to see if it has both keywords, but not necessarily next to each other
                                    keywords = row[0].split("^")
                                    if len(keywords) == 2:
                                        if string.lower(
                                                keywords[0]) in string.lower(
                                                    newdata['text']
                                                ) and string.lower(
                                                    keywords[1]
                                                ) in string.lower(
                                                    newdata['text']):
                                            cursor.execute(
                                                """SELECT timestamp,timediff FROM programmes WHERE pid = %s ORDER BY timestamp DESC""",
                                                (pid))
                                            progdata = cursor.fetchone()
                                            if progdata != None:
                                                # Ensure the user hasn't already tweeted the same text
                                                # Also ensure they haven't tweeted in the past 10 seconds
                                                timestamp = time2.mktime(
                                                    parse(newdata['created_at']
                                                          ).timetuple())
                                                cursor.execute(
                                                    """SELECT * FROM rawdata WHERE (pid = %s AND text = %s AND user = %s) OR (pid = %s AND user = %s AND timestamp >= %s AND timestamp < %s)""",
                                                    (pid, newdata['text'],
                                                     newdata['user']
                                                     ['screen_name'], pid,
                                                     newdata['user']
                                                     ['screen_name'],
                                                     timestamp - 10,
                                                     timestamp + 10))
                                                if cursor.fetchone() == None:
                                                    print(
                                                        "Storing tweet for pid "
                                                        + pid)
                                                    # Work out where this tweet really occurred in the programme using timestamps and DVB bridge data
                                                    progposition = timestamp - (
                                                        progdata[0] -
                                                        progdata[1])
                                                    cursor.execute(
                                                        """INSERT INTO rawdata (tweet_id,pid,timestamp,text,user,programme_position) VALUES (%s,%s,%s,%s,%s,%s)""",
                                                        (tweetid, pid,
                                                         timestamp,
                                                         newdata['text'],
                                                         newdata['user']
                                                         ['screen_name'],
                                                         progposition))
                                                    break  # Break out of this loop and back to check the same tweet against the next programme
                                                else:
                                                    print(
                                                        "Duplicate tweet from user - ignoring"
                                                    )
                                    if string.lower(row[0]) in string.lower(
                                            newdata['text']):
                                        cursor.execute(
                                            """SELECT timestamp,timediff FROM programmes WHERE pid = %s ORDER BY timestamp DESC""",
                                            (pid))
                                        progdata = cursor.fetchone()
                                        if progdata != None:
                                            # Ensure the user hasn't already tweeted the same text for this programme
                                            # Also ensure they haven't tweeted in the past 10 seconds
                                            timestamp = time2.mktime(
                                                parse(newdata['created_at']).
                                                timetuple())
                                            cursor.execute(
                                                """SELECT * FROM rawdata WHERE (pid = %s AND text = %s AND user = %s) OR (pid = %s AND user = %s AND timestamp >= %s AND timestamp < %s)""",
                                                (pid, newdata['text'],
                                                 newdata['user']
                                                 ['screen_name'], pid,
                                                 newdata['user']
                                                 ['screen_name'], timestamp -
                                                 10, timestamp + 10))
                                            if cursor.fetchone() == None:
                                                print(
                                                    "Storing tweet for pid " +
                                                    pid)
                                                # Work out where this tweet really occurred in the programme using timestamps and DVB bridge data
                                                progposition = timestamp - (
                                                    progdata[0] - progdata[1])
                                                cursor.execute(
                                                    """INSERT INTO rawdata (tweet_id,pid,timestamp,text,user,programme_position) VALUES (%s,%s,%s,%s,%s,%s)""",
                                                    (tweetid, pid, timestamp,
                                                     newdata['text'],
                                                     newdata['user']
                                                     ['screen_name'],
                                                     progposition))
                                                break  # Break out of this loop and back to check the same tweet against the next programme
                                            else:
                                                print(
                                                    "Duplicate tweet from user - ignoring"
                                                )
                    else:
                        print "Blank line received from Twitter - no new data"

                    print("Done!")  # new line to break up display
            else:
                time2.sleep(0.1)
示例#47
0
 def MakeRevIndex(self, catrel, arch, osrel):
   key = (catrel, arch, osrel)
   if key in self.cached_catalogs_bins:
     return
   fn_bins = cache_file_bins% key
   fn_links = cache_file_links% key
   fn_needed_bins = cache_file_needed_bins % key
   if os.path.exists(fn_bins) and os.path.exists(fn_needed_bins) and os.path.exists(fn_links):
     with open(fn_bins, "r") as fd:
       self.cached_catalogs_bins[key] = cjson.decode(fd.read())
     with open(fn_links, "r") as fd:
       self.cached_catalogs_links[key] = cjson.decode(fd.read())
     with open(fn_needed_bins, "r") as fd:
       self.cached_catalogs_needed_bins[key] = cjson.decode(fd.read())
     return
   catalog = self.rest_client.GetCatalog(*key)
   bins = {}
   links = {}
   needed_bins = {}
   i = 0
   for pkg_simple in catalog:
     i = i+1
     cb = []
     cl = []
     nb = []
     # pprint.pprint(pkg_simple)
     md5 = pkg_simple["md5_sum"]
     pkg = self.cp.GetPkgstats(md5)
     if not pkg:
       logging.warning("MakeRevIndex: No package for %r", md5)
       continue
     try:   
       pkg_name = pkg["basic_stats"]["pkgname"]
       for p in pkg['binaries_dump_info']:
         for b in p['needed sonames']:
           if b not in nb:
             nb.append(b)
       for b in pkg['binaries']:
           if b not in cb:
             cb.append(b)
           else:
             logging.debug("MakeRevIndex: %s already in cache")
       for pm in pkg['pkgmap']:
           if pm['type'] == 's': # symbolic link
             cl.append(pm['line'].split(' ')[3].split('=')[0]) # take the linkname
               
     except KeyError:
       logging.warning("MakeRevIndex: no pkg structure: ")
       # logging.warning(pkg)
     bins[pkg_name] = cb
     needed_bins[pkg_name] = nb
     links[pkg_name] = cl
     sys.stdout.write("\rMakeRevIndex:%4d %s" % (i,pkg_name))
     sys.stdout.flush()
   sys.stdout.write("\n")
   self.cached_catalogs_bins[key] = bins
   self.cached_catalogs_links[key] = links
   self.cached_catalogs_needed_bins[key] = needed_bins
   with open(fn_bins, "w") as fd:
     fd.write(cjson.encode(self.cached_catalogs_bins[key]))
   fd.close()
   with open(fn_links, "w") as fd:
     fd.write(cjson.encode(self.cached_catalogs_links[key]))
   fd.close()
   with open(fn_needed_bins, "w") as fd:
     fd.write(cjson.encode(self.cached_catalogs_needed_bins[key]))
   fd.close()
示例#48
0
 def serialize(cls, data):
     return cjson.encode(data)
示例#49
0
def dump_json(replay):
    try:
        from cjson import encode
    except ImportError:
        from simplejson import dumps as encode
    print encode(replay_to_dict(replay))
 def setmessage(self, value):
     self.level = cjson.encode(value, True)
示例#51
0
    if key1 in removelist or key2 in removelist:
        continue
    cosine_dict[key1][key2] = value
    cosine_dict[key2][key1] = value
centrality_list = []
for key, value in cosine_dict.iteritems():
    centrality = 0
    #    print key,value
    for tag, freq in value.iteritems():
        if freq > 0.48:
            centrality += freq
    centrality_list.append([key, centrality])
a = sorted(centrality_list, key=itemgetter(1), reverse=1)
outfile1 = open(sys.argv[3], 'w')
for tag, cen in a:
    outfile1.write(cjson.encode([tag, cen]) + '\n')

#algorithm
THRESHOLD = 0.48
graph_parent_dict = {}
graph_parent_dict_s = {}
G = nx.DiGraph()
#G.add_node('root')
#for tag,centrality in centrality_list:
b = a
rule = open(sys.argv[7], 'w')
for tag, cen in b:
    max_can_value = 0
    max_can = ''
    for node in G.nodes():
        similarity = cosine_dict[node].get(tag, 0)
def populate_ipo_table():
    ipo_urls = IPOInfoUrl.query.all()

    known_unwrs = set()

    for url in ipo_urls:

        comp = Company.query.filter_by(symbol=url.symbol).first()
        if not comp:
            #        session.add(comp)
            #        session.commit()
            continue

        if HistoricalIPO.query.filter_by(company_id=comp.id).first():
            print "Data exists for:", url.symbol
            continue

    #    comp = get_company_overview(url.symbol)
    #    if not comp:
    #        log.warning("Cannot get company info for %s" % url.symbol)
    #        continue

        ipo_data = scrape_ipo(url.url)
        if ipo_data == {}:
            continue
        log.info("IPO data from NASDAQ.com:\n%s" % cjson.encode(ipo_data))
        underwriters = ipo_data["underwriters"]
        lead_underwriters = ipo_data["lead_underwriters"]
        del ipo_data["underwriters"]
        del ipo_data["lead_underwriters"]

        ipo_date = ipo_data["ipo_date"]
        try:
            month, day, year = [int(i) for i in ipo_date.split("/")]
            ipo_date = datetime.date(year, month, day).strftime("%Y%m%d")
            #ipo_data["ipo_date"] = datetime.date(year, month, day).strftime("%Y-%m-%d")
            ipo_data["ipo_date"] = datetime.date(year, month, day)
        except:
            log.error("Error in IPO date:%s" % url.symbol)
            continue

        ipo_data_dir = os.path.join(tickdata_dir, ipo_date)
        ipo_data_path = os.path.join(ipo_data_dir,
                                     "%s_markethours.csv.gz" % url.symbol)
        exist = False
        if os.path.exists(ipo_data_dir) and os.path.exists(ipo_data_path):
            exist = True
            log.info("IPO data found")
        else:
            request = {
                "command": "get",
                "symbol": url.symbol,
                "date": ipo_date,
                "gettrade": "true",
                "getquote": "true"
            }
            try:
                fetcher_caller = fetcher.FetcherCaller()
                fetcher_caller.set_request(cjson.encode(request))
                response = fetcher_caller.send_request()
                fetcher_caller.close()
            except:
                log.error("Unable to send fetch request")
                continue

            count_down = 60
            fetched = False
            while count_down > 0:
                if os.path.exists(ipo_data_path):
                    log.info("IPO data fetched: %s" % url.symbol)
                    fetched = True
                    time.sleep(5)
                    break
                time.sleep(1)
                count_down -= 1
            if not fetched:
                log.error("Unable to download data for %s" % url.symbol)

        if exist or fetched:
            itd = process_ipo_tick_data(symbol, ipo_date)
            ipo_data["open_vol"] = itd["open_vol"]
            ipo_data["first_opening_price"] = itd["first_opening_price"]
            ipo_data["first_closing_price"] = itd["first_closing_price"]
            ipo_data["first_trade_time"] = itd["first_trade_time"]
            ipo_data["first_day_high"] = itd["first_day_high"]
            ipo_data["first_day_low"] = itd["first_day_low"]
            ipo_data["first_day_high_percent_change"] = itd[
                "first_day_high_percent_change"]
            ipo_data["first_day_low_percent_change"] = itd[
                "first_day_low_percent_change"]
            ipo_data["first_day_volume"] = itd["first_day_volume"]
        else:
            ipo_data["open_vol"] = None
            ipo_data["first_opening_price"] = None
            ipo_data["first_closing_price"] = None
            ipo_data["first_trade_time"] = None
            ipo_data["first_day_high"] = None
            ipo_data["first_day_low"] = None
            ipo_data["first_day_high_percent_change"] = None
            ipo_data["first_day_low_percent_change"] = None
            ipo_data["first_day_volume"] = None

        ipo_data["scoop_rating"] = 0
        ipo_data["company_id"] = comp.id
        log.info("Final IPO data for %s:\n%s" % (url.symbol, ipo_data))
        """
        for u in underwriters:
            if u in known_unwrs:
                unwr = Underwriter.query.filter_by(name=u).first()
            else:
                unwr = Underwriter(u)
                known_unwrs.add(u)
            session.add(unwr)
            session.commit()
            a = CompanyUnderwriterAssociation(company_id=comp.id, underwriter_id=unwr.id, lead=False)
            comp.underwriters.append(a)
            session.commit()
    
        for u in lead_underwriters:
            if u in known_unwrs:
                unwr = Underwriter.query.filter_by(name=u).first()
            else:
                unwr = Underwriter(u)
                known_unwrs.add(u)
            session.add(unwr)
            session.commit()
            a = CompanyUnderwriterAssociation(company_id=comp.id, underwriter_id=unwr.id, lead=True)
            comp.underwriters.append(a)
            session.commit()
        """

        historical_ipo = HistoricalIPO(**ipo_data)
        session.add(historical_ipo)
        session.commit()
示例#53
0
                    probs) - posterior_normalizer

        best_genotype_combo = genotype_combo_probs[0][0]
        best_genotype_combo_prob = genotype_combo_probs[0][1]

        #best_genotype_probability = math.exp(sum([prob for name, (genotype, prob) in best_genotype_combo]) \
        #        + allele_frequency_probabilityln(count_frequencies([genotype for name, (genotype, prob) in best_genotype_combo])) \
        #        - posterior_normalizer)
        best_genotype_probability = math.exp(best_genotype_combo_prob -
                                             posterior_normalizer)
        position['best_genotype_combo'] = [[
            name,
            genotype_str(genotype),
            math.exp(marginals[name][genotype_str(genotype)])
        ] for name, (genotype, prob) in best_genotype_combo]
        position['best_genotype_combo_prob'] = best_genotype_probability
        position['posterior_normalizer'] = math.exp(posterior_normalizer)
        position['combos_tested'] = combos_tested
        #position['genotype_combo_probs'] = genotype_combo_probs
        # TODO estimate marginal probabilities of genotypings
        # here we cast everything into float-space
        for samplename, sample in samples.items():
            sample['genotypes'] = sorted(
                [[genotype_str(genotype),
                  math.exp(prob)] for genotype, prob in sample['genotypes']],
                key=lambda c: c[1],
                reverse=True)

        print cjson.encode(position)
        #print position['position']
示例#54
0
cnt = 0
for key, value in bb.iteritems():
    xxx = sorted(value.items(), key=itemgetter(1), reverse=1)
    prb = sorted(bbb[key].items(), key=itemgetter(1), reverse=1)
    print >> out1, key, xxx, prb
    if len(xxx) >= 2 and xxx[0][1] == xxx[1][1] and xxx[0][1] >= 3:
        #print>>out1,key,xxx,prb
        eqlist = []
        for item in xxx:
            if xxx[0][1] == item[1]:
                print bbb[key], item[0]
                eqlist.append([item[0], bbb[key][item[0]]])
        master = sorted(eqlist, key=itemgetter(1), reverse=1)[0]
        master.append(xxx[0][1])
        bb1[key] = master

    elif len(xxx) == 1 and xxx[0][1] >= 3:
        bb1[key] = xxx[0]
    elif len(xxx) >= 2 and xxx[0][1] != xxx[1][1] and xxx[0][1] >= 3:
        bb1[key] = xxx[0]
    else:
        pass
    if ' ' not in key:
        cnt += 1
    else:
        print '--', key, xxx
print len(bb1)  #with phrase
print cnt  #without phrase
outfile = open('backbone-3', 'w')
outfile.write(cjson.encode(bb1))
示例#55
0
def initconfig(agi, cursor, args):  # pylint: disable-msg=W0613
    """
    Provisioning by code on keypad.
    """
    sip_uri = args[0]
    code = args[1]
    ua = args[2]
    isinalan = 1

    # Get Sip User, IPv4 and Mac Address
    user_ipv4 = user_ipv4_from_sip_uri(sip_uri)
    if not user_ipv4:
        send_error(agi, "Could not parse Sip URI \"%s\"" % sip_uri)
        return
    sip_user, ipv4 = user_ipv4  # pylint: disable-msg=W0612
    macaddr = xivo_config.macaddr_from_ipv4(ipv4)  # XXX, agi_session.verbose)
    if not macaddr:
        send_error(agi, "Could not find Mac Address from IPv4 \"%s\"" % ipv4)
        return

    # Get Phone description (if we are able to handle this vendor...)
    phone_desc = xivo_config.phone_desc_by_ua(ua)  # XXX, agitb.handler)
    if not phone_desc:
        send_error(agi, "Unknown UA %r" % (ua, ))
        return
    phone_vendor = phone_desc[0]
    phone_model = phone_desc[1]

    if code == 'init':
        code = '0'
    if not xivo_config.well_formed_provcode(code):
        send_error(agi, "Badly formed provisioning code", "privacy-incorrect")
        return
    code = int(code)

    command = {
        'mode': 'authoritative',
        'vendor': phone_vendor,
        'model': phone_model,
        'macaddr': macaddr,
        'ipv4': ipv4,
        'provcode': code,
        'actions': 'yes',
        'proto': 'sip',
        'isinalan': isinalan,
    }

    try:
        socket.setdefaulttimeout(float(Pgc['http_request_to_s']))
        conn = httplib.HTTPConnection(Pgc['connect_ipv4'] + ':' +
                                      str(Pgc['connect_port']))
        conn.request("POST", "/provisioning", cjson.encode(command),
                     {"Content-Type": "application/json"})
        response = conn.getresponse()
        response.read()  # eat every data sent by the provisioning server
        conn.close()
        reason = response.reason
        status = response.status
    except Exception, xcept:
        reason = str(xcept)
        status = 500
        agitb.handler()
        del xcept
        sys.exc_clear()
示例#56
0
                        save = True
                    except IOError, e:
                        print ("Failed to load config file - not saving oauth keys: " + str(e))

                    if save:
                        raw_config = file.read()

                        file.close()

                        # Read config and add new values
                        config = cjson.decode(raw_config)
                        config['key'] = access_token['oauth_token']

                        config['secret'] = access_token['oauth_token_secret']

                        raw_config = cjson.encode(config)

                        # Write out the new config file
                        try:
                            file = open(homedir + "/twitter-login.conf",'w')
                            file.write(raw_config)
                            file.close()
                        except IOError, e:
                            print ("Failed to save oauth keys: " + str(e))

                    self.keypair = [access_token['oauth_token'], access_token['oauth_token_secret']]
        

        while not self.finished():
            # TODO: Implement backoff algorithm in case of connection failures - watch out for the fact this could delay the requester component
            if self.dataReady("inbox"):
示例#57
0
文件: nmsg.py 项目: rainfly123/fuwav2
 def get(self):
     data = mysql.Query()
     self.write(cjson.encode(data))
示例#58
0
import cjson
from operator import itemgetter
infile = '/spare/wei/user_tag_dict_sift_1'
userlist=[]
for line in open(infile,'r'):
    #line=line.split('\t')[1]
    line=cjson.decode(line)
    userlist.append(line)
aaa=sorted(userlist,key=itemgetter(2),reverse=1)
user_tag_coverage=[]
user_tag_cnt=[]
tags=[]
last_user_sum=0
for ls in aaa:
    user_tag_cnt.append([ls[0],len(ls[1])])
    num_new=0
    for key,value in ls[1].iteritems():

        if key not in tags:
            num_new+=1
            tags.append(key)
    user_tag_coverage.append([ls[0],num_new,num_new+last_user_sum])
    last_user_sum=num_new+last_user_sum    
outfile=open('/spare/wei/user_tag_coverage','w')
outfile.write(cjson.encode(user_tag_coverage)+'\n')
outfile.write(cjson.encode(user_tag_cnt)+'\n')

示例#59
0
 def __str__(self):
     return cjson.encode(self.attributes)
示例#60
0
文件: nmsg.py 项目: rainfly123/fuwav2
 def get(self):
     user = self.get_argument("userid", strip=True)
     data = mysql.Myquery(user)
     self.write(cjson.encode(data))