Exemple #1
1
 def update_queue(self, queue_name, options=None):
     body = json.dumps({})
     if options is not None:
         body = json.dumps({"queue": options})
     url = "queues/%s" % queue_name
     response = self.client.patch(url, body=body, headers={"Content-Type": "application/json"})
     return response["body"]["queue"]
 def get(self, action=None):
     self.set_header('Access-Control-Allow-Origin', '*')
     self.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')
     self.set_header('Access-Control-Max-Age', 1000)
     self.set_header('Access-Control-Allow-Headers', 'Authorization, Origin, X-Requested-With, Content-Type, Accept')
     self.set_header('Content-type', 'application/json')
     #print action
     if action is None: #send saved retentions list back
         self.write(json.dumps({"id": 1, "label": "first segment"}))
     elif action == "data": # get retention data
         start_date = self.get_argument("startDate", None)
         end_date = self.get_argument("endDate", None)
         unit = self.get_argument("unit", "day")
         return_event = self.get_argument("returnEvent", "any event")
         born_event = self.get_argument("bornEvent", 'request demo page')
         interval_count = self.get_argument("intervalCount", 10)
         data_as = self.get_argument("dataAs", 'number')
         by = self.get_argument("by", "")
         where = self.get_argument("where", "")
         viz = self.get_argument("viz", "bar")
         # call the service to get  data
         if return_event == 'any event':
             return_event = None
         retention_data = self.analytics.get_retention_data(born_event, start_date, end_date, 
                     unit, int(interval_count), data_as, return_event, by, where, viz)
         #print segments_data
         self.write(json.dumps(retention_data))
Exemple #3
0
def push_notification(request):
    #You may or may not get the device ids. Make sure this parameter is optional
    #send to all who has registered for that app
    # Try different ios applications and store device ids and try to send PNs
    if request.method != 'POST':
        result = {'result': 'This is not the correct method use POST method instead GET method!'}
        return HttpResponse(json.dumps(result), content_type='application/json')
    data = eval(str(request.body))
    if not data.has_key('device_key') or not data.has_key('token') or not data.has_key('message'):
        result = {'result': 'Please pass the token provied by us along with the Device Keys and message!'}
        return HttpResponse(json.dumps(result), content_type='application/json')
    try:
        pem_certificate = P12Certificate.objects.get(emb_token=data['token'])
    except:
        result = {'result': 'Please check your token number!'}
        return HttpResponse(json.dumps(result), content_type='application/json')
    '''
    device = GCMDevice.objects.get(registration_id=gcm_reg_id)
    device.send_message({"foo": "bar"}) # The message will be sent and received as json.
    '''
    #device = APNSDevice.objects.get(registration_id=apns_token)
    device_keys = data['device_key']
    for device_key in device_keys:
        try:
            device = APNSDevice.objects.get(registration_id=device_key)
        except:
            device = APNSDevice.objects.create(registration_id=device_key)
        device.save()
        device.send_message(data['message'],  certificate=pem_certificate.pem_file) # The message may only be sent as text.

    result = {'result': 'successfully sent the push notification!'}
    return HttpResponse(json.dumps(result), content_type='application/json')
Exemple #4
0
 def test_query(self):
     j = self._api.new_job(self.VALID_QUERY)
     print(json.dumps(j))
     job_id = j["job_id"]
     r = self._api.check_job_loop(job_id)
     print(json.dumps(r))
     print(json.dumps(self._api.get_results(job_id, 1)))
Exemple #5
0
def migrate_rooms(src_rooms, dst_rooms, to_migrate_isps, username):
    history_id = MigrateDal.create_migrage_history(username)

    migrated_isp_rooms = MigrateDal.get_all_abnormal_isps(key='isp', value='room')
    to_migrate_dict = {isp: set(src_rooms) | migrated_isp_rooms.get(isp, set())
                       for isp in to_migrate_isps}

    migrate_isp_domains = MigrateDal.list_migrate_domain_by_isp(to_migrate_dict, dst_rooms)

    has_migrate_domains = False
    for isp, migrate_domains_list in migrate_isp_domains.items():
        migrate_domains_list = [domain for domain in migrate_domains_list if domain['after_enabled_rooms']]
        if len(migrate_domains_list) == 0:
            continue
        has_migrate_domains = True
        MigrateDal.update_history_total(history_id, len(migrate_domains_list))
        m_thread = MigrateThread(username, history_id, migrate_domains_list)
        m_thread.start()

    if has_migrate_domains:
        MigrateDal.add_batch_abnormal_isp(username, to_migrate_dict)
        # send_alert_email("[FROM DNSDB]: 机房{}上运营商{}迁移到{}啦.".format(src_rooms, to_migrate_isps, dst_rooms))
        MigrateDal.update_history_by_id(history_id,
                                        migrate_rooms=json.dumps(src_rooms),
                                        migrate_isps=json.dumps(to_migrate_isps),
                                        dst_rooms=json.dumps(dst_rooms),
                                        migrate_info=json.dumps({}))
    else:
        MigrateDal.delete_history_by_id(history_id)
        raise BadParam("no domain can migrate, isp_rooms: %s"
                       % to_migrate_dict, msg_ch=u'没有可迁移的机房')

    history_info = get_migrate_info(history_id)
    return history_info
Exemple #6
0
    def loadDatasetToMLDB(cls):
        dataset_name = "null_column_test"
        cls.url = "http://localhost:%d/v1" % cls.port
        cls.dataset_url = cls.url + "/datasets/" + dataset_name
        requests.delete(cls.dataset_url)

        # Register the dataset
        data = json.dumps({
            "type": "sparse.mutable",
            "id": dataset_name,
        })
        requests.post(cls.url + "/datasets", data=data)

        # Fill the data
        #  ___________________
        # |     | col1 | col2 |
        #  -------------------
        # | r1  |  1   |      |
        #  -------------------
        # | r2  |  1   |  2   |
        #  -------------------

        ts = datetime.datetime.now().isoformat(' ')

        cols = [['col1', 1, ts]]
        data = {"rowName": "r1", "columns": cols}
        requests.post(cls.dataset_url + "/rows", json.dumps(data))

        cols = [['col1', 1, ts], ['col2', 2, ts]]
        data = {"rowName": "r2", "columns": cols}
        requests.post(cls.dataset_url + "/rows", json.dumps(data))

        # Commit the dataset
        requests.post(cls.dataset_url + "/commit")
def edit_plan(request, access_token, planid):
	result = login_auth(access_token)
	if result['err']['code'] != 0:
		return HttpResponse(json.dumps(result))	
	userid = result['data']['id']
	try:
		plan = Plan.objects.get(id__exact=planid)
		user = FBUser.objects.get(fbid=userid)
		PrivatePlan.objects.filter(accessible_plan=plan).delete()
		plan.holder = user
		plan.title = request.POST.get('title', plan.title)
		plan.destination = request.POST.get('destination', plan.destination)
		plan.description = request.POST.get('description', plan.description)
		plan.depart_time = request.POST.get('depart_time', plan.depart_time)
		plan.length = request.POST.get('length', plan.length)
		plan.limit = request.POST.get('limit', plan.limit)
		visible_type = request.POST.get('visible_type', plan.visible_type)
		plan.visible_type = int(visible_type)
		friend_list = request.POST.getlist('friendlist', [])
		plan.full_clean()
		plan.save()
		if plan.visible_type == 3:
			for friendid in friend_list:
				friend = FBUser.objects.get(fbid=friendid)
				private = PrivatePlan()
				private.accessible_user = friend
				private.accessible_plan = plan
				private.full_clean()
				private.save()
		result = format(0, 'edit success')
		return HttpResponse(json.dumps(result))	
	except Exception as e:
   		result = format(400, str(e))
        return HttpResponse(json.dumps(result))	
Exemple #8
0
def handle(data, con, apikey=None):
  d = json.loads(data)

  handlers = {'import': importit, 'ping': ping,
      'listimported': listimported, 'slice': sliceit,
      'listdone': listdone, 'getdone': getdone,
      'importconfig': importconfig, 'listconfig': listconfigs,
      'listprogress': listprogress, 'getstats': getstats,
      'journal': getjournal, 'del': wipefile, 'wait': waitfor}

  hndlr = noop
  cmd = 'noop'
  if d.has_key('cmd'):
    if d['cmd'] in handlers.keys():
      cmd = d['cmd']
      hndlr = handlers[cmd]

  logging.info('cmd: ' + cmd)

  if not apikey is None:
    if not (d.has_key('key') and d['key'] == apikey):
      logging.info('authentication failed for "{}" key!'.format(
        '' if not d.has_key('key') else d['key']))
      return json.dumps({'r': 'fail',
        'm': 'authentication failed. incorrect apikey'})

  try:
    r = hndlr(d, con)
    result = json.dumps(r)
  except Exception as e:
    logging.error(str(e))
    result = json.dumps({u'm':unicode(e), u'r':u'fail'})
  logaccess(len(data), len(result), unicode(cmd), con)

  return result
Exemple #9
0
def main():
    args_file = sys.argv[1]
    args_data = file(args_file).read()
    arguments = shlex.split(args_data)
    worker = 'all'
    action = WorkItemList.display

    for arg in arguments:
        # ignore any arguments without an equals in it
        if '=' in arg:
            (key, value) = arg.split('=')
            if key == 'worker':
                worker = workers[value]
            if key == 'action':
                if value == 'create':
                    action = WorkItemList.create
                elif value == 'teardown':
                    action = WorkItemList.teardown
                elif value == 'display':
                    action = WorkItemList.display

    logging.basicConfig(level=logging.ERROR)

    action(worker)
    print json.dumps({
        'success': True,
        'args': args_data
    })
Exemple #10
0
def loggedIn():
  if 'credentials' not in flask.session:
    return json.dumps({"loggedIn":False})
  elif client.OAuth2Credentials.from_json(flask.session['credentials']).access_token_expired:
    return json.dumps({"loggedIn":False})
  else:
    return json.dumps({"loggedIn":True})
Exemple #11
0
    def post(self, *args, **kwargs):
        self.set_header("Content-Type", "application/json")

        try:
            data = json.loads(self.request.body.decode("utf8"))
            conv = Conveyance(data)
            compose = conv.compose()(conv.definitions, conv.resources)
        except (ValidationError, jsonschema.ValidationError) as e:
            # print('this')
            # raise tornado.web.HTTPError(404, reason=e.args[0])
            # compose = {
            #     "validation_error": e
            # }

            self.set_status(401)
            self.set_header('WWW-Authenticate', 'Basic realm="something"')
            data = {
                "error": str(e)
            }
            self.write(json.dumps(data))
            raise tornado.web.Finish()
        except:
            self.set_status(401)
            self.set_header('WWW-Authenticate', 'Basic realm="something"')
            data = {
                "error": sys.exc_info()[0]
            }
            self.write(json.dumps(data))
            raise tornado.web.Finish()


        self.write(json.dumps(compose))
Exemple #12
0
def send_data(save_path):
    """
    Sends all the data files that are present in the specified path to the Qbike server.
    :param save_path: Requires the path in which the trips are saved.
    :return: Nothing. The data is sent to the Server and the txt files are removed from the path's directory.
    """
    end = False
    Trip_nb = 100
    while end == False:
        if not os.path.isfile('C:\Users\Joren\Documents\Ir 1B\P&O\P&O 3\Tryouts\Trips\Trip1.txt'):
            end = True

        else:
            for nb in reversed(range(0, 100)):
                Trip = os.path.join(save_path, "Trip" + str(nb) + ".txt")
                Trip_nb = str(nb)
                if os.path.isfile(Trip):
                    break

            Trip_path = os.path.join(save_path, r"Trip" + Trip_nb + r".txt")

            with open(Trip_path, "r") as Trip:
                batch = json.load(Trip)

            info = {'purpose': 'batch-sender', 'groupID': "cwa2", 'userID': ID}
            socketIO = SocketIO('dali.cs.kuleuven.be', 8080)
            socketIO.on('server_message', on_response)
            socketIO.emit('start', json.dumps(info), on_response)
            socketIO.wait(2)
            socketIO.emit('batch-tripdata', json.dumps(batch), on_response)
            socketIO.wait(5)

            os.remove(Trip_path)

    print "Sent Data"
    def setUp(self):
        self.username = "******"
        self.email = "mailto:[email protected]"        
        self.password = "******"
        self.auth = "Basic %s" % base64.b64encode("%s:%s" % (self.username, self.password))
        form = {'username':self.username,'email': self.email,'password':self.password,'password2':self.password}
        response = self.client.post(reverse(views.register),form, X_Experience_API_Version="1.0.0")


        self.testparams1 = {"stateId": self.stateId, "activityId": self.activityId, "agent": self.testagent}
        path = '%s?%s' % (self.url, urllib.urlencode(self.testparams1))
        self.teststate1 = {"test":"put activity state 1","obj":{"agent":"test"}}
        self.put1 = self.client.put(path, json.dumps(self.teststate1), content_type=self.content_type, Authorization=self.auth, X_Experience_API_Version="1.0.0")

        self.testparams2 = {"stateId": self.stateId2, "activityId": self.activityId, "agent": self.testagent}
        path = '%s?%s' % (self.url, urllib.urlencode(self.testparams2))
        self.teststate2 = {"test":"put activity state 2","obj":{"agent":"test"}}
        self.put2 = self.client.put(path, json.dumps(self.teststate2), content_type=self.content_type, Authorization=self.auth, X_Experience_API_Version="1.0.0")

        self.testparams3 = {"stateId": self.stateId3, "activityId": self.activityId2, "agent": self.testagent}
        path = '%s?%s' % (self.url, urllib.urlencode(self.testparams3))
        self.teststate3 = {"test":"put activity state 3","obj":{"agent":"test"}}
        self.put3 = self.client.put(path, json.dumps(self.teststate3), content_type=self.content_type, Authorization=self.auth, X_Experience_API_Version="1.0.0")

        self.testparams4 = {"stateId": self.stateId4, "activityId": self.activityId2, "agent": self.otheragent}
        path = '%s?%s' % (self.url, urllib.urlencode(self.testparams4))
        self.teststate4 = {"test":"put activity state 4","obj":{"agent":"other"}}
        self.put4 = self.client.put(path, json.dumps(self.teststate4), content_type=self.content_type, Authorization=self.auth, X_Experience_API_Version="1.0.0")
 def _get_field_values(self):
     values = {}
     vocabularies_resource = get_resource_service('vocabularies')
     values['anpa_category'] = vocabularies_resource.find_one(req=None, _id='categories')['items']
     req = ParsedRequest()
     req.where = json.dumps({'$or': [{"schema_field": "genre"}, {"_id": "genre"}]})
     genre = vocabularies_resource.get(req=req, lookup=None)
     if genre.count():
         values['genre'] = genre[0]['items']
     values['urgency'] = vocabularies_resource.find_one(req=None, _id='urgency')['items']
     values['priority'] = vocabularies_resource.find_one(req=None, _id='priority')['items']
     values['type'] = vocabularies_resource.find_one(req=None, _id='type')['items']
     subject = vocabularies_resource.find_one(req=None, schema_field='subject')
     if subject:
         values['subject'] = subject['items']
     else:
         values['subject'] = get_subjectcodeitems()
     values['desk'] = list(get_resource_service('desks').get(None, {}))
     values['stage'] = self._get_stage_field_values(values['desk'])
     values['sms'] = [{'qcode': 0, 'name': 'False'}, {'qcode': 1, 'name': 'True'}]
     values['embargo'] = [{'qcode': 0, 'name': 'False'}, {'qcode': 1, 'name': 'True'}]
     req = ParsedRequest()
     req.where = json.dumps({'$or': [{"schema_field": "place"}, {"_id": "place"}, {"_id": "locators"}]})
     place = vocabularies_resource.get(req=req, lookup=None)
     if place.count():
         values['place'] = place[0]['items']
     values['ingest_provider'] = list(get_resource_service('ingest_providers').get(None, {}))
     return values
Exemple #15
0
def markit(request, f_id):
    if request.method == 'POST':
        mark_form = MarkForm(request.POST)
        if mark_form.is_valid():
            film = Film.objects.get(pk=f_id)
            user = auth.get_user(request)
            try:
                mark = Mark.objects.get(user=user, film=film)
                mark.number = mark_form.cleaned_data['number']
                mark.save()
                response_data = {
                    "code": 0,
                    "response": "OK",
                    "your_mark": mark.number,
                    "average_mark": Mark.objects.average_mark(f_id),
                }
                return HttpResponse(json.dumps(response_data),
                                    content_type="application/json")
            except Mark.DoesNotExist:
                mark = Mark(user=user, film=film, number=mark_form.cleaned_data['number'])
                mark.save()
                response_data = {
                    "code": 0,
                    "response": "OK",
                    "your_mark": mark.number,
                    "average_mark": Mark.objects.average_mark(f_id),
                }
                return HttpResponse(json.dumps(response_data),
                                    content_type="application/json")
        else:
            return HttpResponse(json.dumps({"code": 0, "response": "Not valid"}), content_type="application/json")
    else:
        return redirect('/plitka/filmbody/%s' % f_id)
Exemple #16
0
	def __authCall(self, call, params={}, item="BTC", currency=""):
		# https://en.bitcoin.it/wiki/MtGox/API/Streaming#Authenticated_commands
		# If 'Invalid call' remark is received, this is probably due to a bad nonce.
		nonce =  self.__getNonce()
		requestId = get_hex_md5(str(nonce))
		requestDict = {
				"id":requestId,
				"call":call,
				"nonce":nonce,
				"params":params,
				"item":item,
				"currency":currency,
				}
		request = json.dumps(requestDict)

		# https://en.bitcoin.it/wiki/MtGox/API/HTTP
		binaryKey = apikey_as_binary(self.__apiKey)
		signature = sign_request(request, self.__apiSecret)
		requestDict = {
				"op":"call",
				"id":requestId,
				"call":base64.b64encode(binaryKey + signature + request),
				"context":"mtgox.com",
				}
		msg = json.dumps(requestDict)
		self.send(msg, False)
		return requestId
    def get(self):
        self._validate_tba_app_id()
        team_key = self.request.get('team')
        year = self.request.get('year')

        response_json = {}

        try:
            response_json = ApiHelper.getTeamInfo(team_key)
            if self.request.get('events'):
                response_json = ApiHelper.addTeamEvents(response_json, year)

            # TODO: matches

            self.response.out.write(json.dumps(response_json))

            track_team_key = team_key
            if year:
                track_team_key = track_team_key + ' (' + year + ')'
            self._track_call_defer('teams/details', track_team_key)

        except IndexError:
            response_json = {"Property Error": "No team found for the key given"}
            self.response.set_status(404)
            self.response.out.write(json.dumps(response_json))
Exemple #18
0
def comment_it(request, f_id):
    user = auth.get_user(request)
    if request.method == 'POST':
        comment_form = CommentForm(request.POST)
        if comment_form.is_valid():
            comment = Comment(text=html.escape(comment_form.cleaned_data['text']),
                              author=user, film=Film.objects.get(pk=f_id))
            comment.save()
            response_data = {
                "code": 0,
                "response": "OK",
                "email": user.email,
                "text": html.escape(comment_form.cleaned_data['text']),
                "c_id": comment.id,
                "f_id": f_id,
                "is_staff": user.is_staff,
            }
            return HttpResponse(json.dumps(response_data),
                                content_type="application/json")
        else:
            return HttpResponse(json.dumps({"code": 0, "response": "Not valid"}),
                                content_type="application/json")

    else:
        return redirect('/plitka/filmbody/%s' % f_id)
    def test_ng_save_update(self):
        # CRUDTestViewWithFK
        request = self.factory.post('/crud/?pk=1',
                                    data=json.dumps({'pk': 1, 'name': 'John2'}),
                                    content_type='application/json')
        response = CRUDTestView.as_view()(request)
        data = json.loads(response.content.decode('utf-8'))
        self.assertEqual(data['name'], 'John2')

        request2 = self.factory.get('/crud/?pk=1')
        response2 = CRUDTestView.as_view()(request2)
        data2 = json.loads(response2.content.decode('utf-8'))
        self.assertEqual(data2['name'], 'John2')

        # CRUDTestViewWithSlug
        request3 = self.factory.post('/crud/?pk=1',
                                    data=json.dumps({'name': 'John', 'email': '*****@*****.**'}),
                                    content_type='application/json')
        response3 = CRUDTestViewWithSlug.as_view()(request3)
        data3 = json.loads(response3.content.decode('utf-8'))
        self.assertEqual(data3['name'], 'John')
        self.assertEqual(data3['email'], '*****@*****.**')

        request4 = self.factory.get('/crud/[email protected]')
        response4 = CRUDTestViewWithSlug.as_view()(request4)
        data4 = json.loads(response4.content.decode('utf-8'))
        self.assertEqual(data4['name'], 'John')

        request5 = self.factory.post('/crud/?pk=3',  # Modifying "Chris"
                                    data=json.dumps({'pk': 4, 'name': 'John2', 'email': '*****@*****.**'}),
                                    content_type='application/json')
        response5 = CRUDTestViewWithSlug.as_view()(request5)
        self.assertGreaterEqual(response5.status_code, 400)
        data5 = json.loads(response5.content.decode('utf-8'))
        self.assertTrue('detail' in data5 and 'email' in data5['detail'] and len(data5['detail']['email']) > 0)
 def pretty_print(self, data):
     if data:
         print json.dumps(data, indent=4, separators=(',', ':'))
     elif self.response_body:
         print json.dumps(self.response_body, indent=4, separators=(',', ':'))
     else:
         print "No data to print..."
Exemple #21
0
def main(argv):
   #put(service)
   flags = parser.parse_args(argv[1:])


  #sample.dat file stores the short lived access tokens, which your application requests user data, attaching the access token to the request.
  #so that user need not validate through the browser everytime. This is optional. If the credentials don't exist
  #or are invalid run through the native client flow. The Storage object will ensure that if successful the good
  # credentials will get written back to the file (sample.dat in this case).
   storage = file.Storage('sample.dat')
   credentials = storage.get()
   if credentials is None or credentials.invalid:
    credentials = tools.run_flow(FLOW, storage, flags)

  # Create an httplib2.Http object to handle our HTTP requests and authorize it
  # with our good Credentials.
   http = httplib2.Http()
   http = credentials.authorize(http)

  # Construct the service object for the interacting with the Cloud Storage API.
   service = discovery.build('storage', _API_VERSION, http=http)
#def put(service):
   starting_time=time.time()
   fileupload=raw_input("please enter the name of the file")

   req = service.objects().insert(
        bucket=_BUCKET_NAME,
        name=fileupload,media_body='all_month.csv')
   resp = req.execute()
   ending_time=time.time()-starting_time
   print ending_time
   fields_to_return = 'nextPageToken,items(bucket,name,metadata(my-key))'

   print json.dumps(resp, indent=2)
    def test_ng_save_create(self):
        # CRUDTestViewWithFK
        request = self.factory.post('/crud/',
                                    data=json.dumps({'name': 'Leonard'}),
                                    content_type='application/json')
        response = CRUDTestView.as_view()(request)
        data = json.loads(response.content.decode('utf-8'))
        pk = data['pk']

        request2 = self.factory.get('/crud/?pk={0}'.format(pk))
        response2 = CRUDTestView.as_view()(request2)
        data2 = json.loads(response2.content.decode('utf-8'))
        self.assertEqual(data2['name'], 'Leonard')

        # CRUDTestViewWithSlug
        request3 = self.factory.post('/crud/',
                                    data=json.dumps({'name': 'Leonard', 'email': '*****@*****.**'}),
                                    content_type='application/json')
        CRUDTestViewWithSlug.as_view()(request3)

        request4 = self.factory.get('/crud/?email={0}'.format('*****@*****.**'))
        response4 = CRUDTestViewWithSlug.as_view()(request4)
        data4 = json.loads(response4.content.decode('utf-8'))
        self.assertEqual(data4['name'], 'Leonard')

        request5 = self.factory.post('/crud/',
                                    data=json.dumps({'name': 'Leonard2', 'email': '*****@*****.**'}),
                                    content_type='application/json')
        response5 = CRUDTestViewWithSlug.as_view()(request5)
        self.assertGreaterEqual(response5.status_code, 400)
        data5 = json.loads(response5.content.decode('utf-8'))
        self.assertTrue('detail' in data5 and 'email' in data5['detail'] and len(data5['detail']['email']) > 0)
Exemple #23
0
    def updateEventRanges(self, event_ranges):
        """ Update an event range on the Event Server """
        pUtil.tolog("Updating event ranges..")

        message = ""
        #url = "https://aipanda007.cern.ch:25443/server/panda"
        url = "https://pandaserver.cern.ch:25443/server/panda"
        # eventRanges = [{'eventRangeID': '4001396-1800223966-4426028-1-2', 'eventStatus':'running'}, {'eventRangeID': '4001396-1800223966-4426028-2-2','eventStatus':'running'}]

        node={}
        node['eventRanges']=json.dumps(event_ranges)

        # open connection
        ret = pUtil.httpConnect(node, url, path=self.__updateEventRangesDir, mode="UPDATEEVENTRANGES")
        # response = json.loads(ret[1])

        status = ret[0]
        if ret[0]: # non-zero return code
            message = "Failed to update event range - error code = %d, error: " % (ret[0], ret[1])
        else:
            response = json.loads(json.dumps(ret[1]))
            status = int(response['StatusCode'])
            message = json.dumps(response['Returns'])

        return status, message
Exemple #24
0
    def handle_ticket(self):
	if "service" in self.jdata:
		if "name" in self.jdata:
			#Note the is_server part, if the request comes from one its own servers.
			if (self.jdata["name"] in names or is_server(self.jdata["name"])) and self.jdata["service"] in services:
				session_key = generate_session_key()
				#Cache the users ip address and session key.
				users[str(self.client_address[0])] = (session_key)
				ticket = [session_key]
				#Randomly choose a server for the request service
				server_id = random.choice(services[self.jdata["service"]])
				server_key = server_id[2]
				server_id = (server_id[0], server_id[1])	
				ticket = secure.encrypt_with_key(json.dumps(ticket), server_key)
				jsonresult = {
					"ticket": ticket,
					"session": session_key,
					"server_id": server_id
				}
				#if a known client
				if(self.jdata["name"] in names):
					data = secure.encrypt_with_key(json.dumps(jsonresult), names[self.jdata["name"]])
				else:
					#we have already checked that its a valid server.
					#Just need to extract its password from the config file.
					pwd = extract_password_from_server(self.jdata["name"])
					data = secure.encrypt_with_key(json.dumps(jsonresult), pwd)
				self.request.send(data)
				return

	self.request.send("[]")
def add_plan(request, access_token):
	result = login_auth(access_token)
	if result['err']['code'] != 0:
		return HttpResponse(json.dumps(result))	
	userid = result['data']['id']
	try:
		new_plan = Plan()
		user = FBUser.objects.get(fbid=userid)
		new_plan.holder = user
		new_plan.title = request.POST.get('title', "testtitle")
		new_plan.destination = request.POST.get('destination', "testdestination")
		new_plan.description = request.POST.get('description', "testdescription")
		new_plan.depart_time = request.POST.get('depart_time', datetime.today())
		new_plan.length = request.POST.get('length', 2)
		new_plan.limit = request.POST.get('limit', 2)
		visible_type = request.POST.get('visible_type', 1)
		new_plan.visible_type = int(visible_type)
		friend_list = request.POST.getlist('friendlist',[])
		new_plan.full_clean()
		new_plan.save()
		if new_plan.visible_type == 3:
			for friendid in friend_list:
				friend = FBUser.objects.get(fbid=friendid)
				private = PrivatePlan()
				private.accessible_user = friend
				private.accessible_plan = new_plan
				private.full_clean()
				private.save()
		result = format(0, 'create success')
		return HttpResponse(json.dumps(result))	
	except Exception as e:
   		result = format(400, str(e))
        return HttpResponse(json.dumps(result))	
Exemple #26
0
def deferred_scrape_channel(scrape_function, callback, channel_id, slack_token, channel_name=None, response_url=None):
    try:
        slack = slacker.Slacker(slack_token)
        if response_url:
            requests.post(response_url,
                          data=json.dumps({'text': f'Getting channel history for {channel_name or channel_id}...'}))
        response = slack.channels.history(channel_id)
    except (KeyError, slacker.Error) as e:
        message = 'There was an error accessing the Slack API'
        if response_url:
            requests.post(response_url, data=json.dumps({'text': message}))
        raise e
    if response.successful:
        messages = response.body.get('messages', [])
        if response_url:
            requests.post(response_url, data=json.dumps({'text': f'Scraping {channel_name or channel_id}...'}))
        album_ids = scrape_function(messages)
        new_album_ids = list_model.check_for_new_list_ids(album_ids)
        try:
            if new_album_ids:
                callback(new_album_ids)
                print(f'[scraper]: {len(new_album_ids)} new albums found and added to the list')
                deferred_process_all_album_details.delay(None)
        except DatabaseError as e:
            message = 'failed to update list'
            print(f'[db]: failed to perform {callback.__name__}')
            print(f'[db]: {e}')
        else:
            message = f'Finished checking for new albums: {len(new_album_ids)} found in {channel_name or channel_id}'
    else:
        message = f'failed to get channel history for {channel_name or channel_id}'
    if response_url:
        requests.post(response_url, data=json.dumps({'text': message}))
Exemple #27
0
def deferred_consume_artist_albums(artist_url, response_url=None):
    try:
        existing_albums = list_model.get_list()
        artist_albums = bandcamp.scrape_bandcamp_album_ids_from_artist_page(artist_url)
        new_album_ids = [album_id for album_id in artist_albums if album_id not in existing_albums]
        if response_url and new_album_ids:
            requests.post(response_url,
                          data=json.dumps({'text': f':full_moon: found {len(new_album_ids)} new albums to process...'}))
        elif response_url:
            requests.post(response_url, data=json.dumps({'text': f':new_moon: found no new albums to process'}))
    except DatabaseError as e:
        print('[db]: failed to check existing items')
        print(f'[db]: {e}')
    except NotFoundError:
        print(f'[scraper]: no albums found for artist at {artist_url}')
        if response_url:
            requests.post(response_url, data=json.dumps({'text': ':red_circle: failed to find any albums'}))
    else:
        for new_album_id in new_album_ids:
            try:
                list_model.add_to_list(new_album_id)
                deferred_process_album_details.delay(str(new_album_id))
            except DatabaseError as e:
                print(f'[db]: failed to update list with {new_album_id} from {artist_url}')
                print(f'[db]: {e}')
        if response_url and new_album_ids:
            requests.post(response_url,
                          data=json.dumps({'text': f':full_moon_with_face: done processing artist albums'}))
    def saveList(self):
        nameOfList = self.LNEDTlistName.text()
        restsOfEachDay = self.collect_restaurants()
        foods = self.collect_foods()
        pyJson = {}
        pyJson["version"]=self.form_data_version
        pyJson["restsOfEachDay"] = restsOfEachDay
        pyJson["foods"] = foods
        # write json file
        json_file = json.dumps(pyJson)
        file = open('./foodlists/' + nameOfList + '.json', 'w')
        file.write(json_file)  #write database to file
        file.close()

        json_file = open('./foodlists/' + nameOfList + ".json", "r")
        json_decoded = json.load(json_file)
        json_file.close()

        datas_file = open('./data/_lisnamedata', "r+")
        datas_decoded = json.load(datas_file)
        if nameOfList.upper() not in (name.upper() for name in datas_decoded["listNames"]):
            datas_decoded["listNames"].append(nameOfList)
            # self.CMBXlists.addItem(nameOfList)
            # self.CMBXeditLists.addItem(nameOfList)

        datas_file.seek(0, 0)  #go to begining of file
        datas_file.write(json.dumps(datas_decoded))
        datas_file.close()
        self.fill_list_of_listname()
        self.showInfoMessage(u" لیست جدید",nameOfList+u" ذخیره شد ")
Exemple #29
0
    def pretty_log(self, args, kwargs, resp, body):
        if not _logger.isEnabledFor(logging.DEBUG):
            return

        string_parts = ['curl -i']
        for element in args:
            if element in ('GET', 'POST'):
                string_parts.append(' -X %s' % element)
            else:
                string_parts.append(' %s' % element)

        for element in kwargs['headers']:
            header = ' -H "%s: %s"' % (element, kwargs['headers'][element])
            string_parts.append(header)

        curl_cmd = "".join(string_parts)
        _logger.debug("REQUEST:")
        if 'body' in kwargs:
            _logger.debug("%s -d '%s'" % (curl_cmd, kwargs['body']))
            try:
                req_body = json.dumps(json.loads(kwargs['body']),
                                      sort_keys=True, indent=4)
            except:
                req_body = kwargs['body']
            _logger.debug("BODY: %s\n" % (req_body))
        else:
            _logger.debug(curl_cmd)

        try:
            resp_body = json.dumps(json.loads(body), sort_keys=True, indent=4)
        except:
            resp_body = body
        _logger.debug("RESPONSE HEADERS: %s" % resp)
        _logger.debug("RESPONSE BODY   : %s" % resp_body)
Exemple #30
0
 def save_logs(self, actions_log_path=None, http_request_log_path=None):
     if http_request_log_path:
         open(http_request_log_path, 'w').write(json.dumps(
                 self.http_request_log))
     if actions_log_path:
         open(actions_log_path, 'w').write(json.dumps({ 'actions':
                                                        self.actions }))
Exemple #31
0
    def do_GET(self):

        # headers is a dict-like object, it doesn't have `iteritems` method, so convert it to `dict`
        req_headers = dict(self.headers)  # dict
        req_headers = dict((h, v) for h, v in req_headers.iteritems() if h.lower() not in self.forbidden_headers)

        req_body_len = int(req_headers.get('content-length', 0))
        req_body = self.rfile.read(req_body_len) # bin or str

        payload = {
            'command': self.command, # str
            'path': self.path, # str
            'headers': json.dumps(req_headers), # json
            'payload': lib.btoa(req_body), # str
        }

        #导出并压缩payload
        payload = lib.dumpDict(payload)

        #判断是否需要加密
        if self.path.startswith('https'):
            payload = lib.encrypt(payload)
        else:
            payload = '0' + payload

        # 向GAE获取的过程
        for i in range(4):
            try:
                res = urllib2.urlopen(gaeServer, payload, lib.deadlineRetry[i])
            except (urllib2.URLError, socket.timeout) as e: 
                logging.error(e)
                continue

            if res.code == 200:  # 如果打开GAE没发生错误
                result = res.read()
                result = lib.decrypt(result)
                result = lib.loadDict( result )

                res_status_code = result.status_code
                res_headers = json.loads(result.headers)
                res_content = lib.atob(result.content)
                break
        else:
            # 如果urllib2打开GAE都出错的话,就换个g_opener吧。
            urllib2.install_opener( get_g_opener() ) 

        # 返回数据给浏览器的过程
        try:
            self.send_response(res_status_code) # 200 or or 301 or 404

            res_headers['connection'] = 'close' # 这样不会对速度造成影响,反而能使很多的请求表现得更为准确。
            for k, v in res_headers.iteritems():
                try:
                    self.send_header(k, v)
                except UnicodeEncodeError: # google plus里面就遇到了v包含中文的情况
                    pass
            self.end_headers()
            self.wfile.write(res_content)
        except socket.error, e:
            # 打开了网页后,在数据到达浏览器之前又把网页关闭了而导致的错误。
            logging.error(e)
Exemple #32
0
from kafka import KafkaConsumer, KafkaProducer
from pymongo import MongoClient
from json import loads, dumps
from time import sleep

producer = KafkaProducer(bootstrap_servers=['kafka:9092'],
                         value_serializer=lambda x: dumps(x).encode('utf-8'))

for e in range(1000):
    data = {'number': e}
    producer.send('mostafa', value=data)
    sleep(5)

consumer = KafkaConsumer('mostafa',
                         bootstrap_servers=['kafka:9092'],
                         auto_offset_reset='earliest',
                         enable_auto_commit=True,
                         group_id='my-group',
                         value_deserializer=lambda x: loads(x.decode('utf-8')))

client = MongoClient('mongo:27017')
collection = client.mostafa.mostafa

for message in consumer:
    message = message.value
    collection.insert_one(message)
    print('{} added to {}'.format(message, collection))
Exemple #33
0
    def notify(self, check):
        n = Notification(owner=check, channel=self)
        n.check_status = check.status

        if self.kind == "email" and self.email_verified:
            ctx = {
                "check": check,
                "checks": self.user.check_set.order_by("created"),
                "now": timezone.now()
            }
            emails.alert(self.value, ctx)
            n.save()
        elif self.kind == "webhook" and check.status == "down":
            try:
                r = requests.get(self.value, timeout=5)
                n.status = r.status_code
            except requests.exceptions.Timeout:
                # Well, we tried
                pass

            n.save()
        elif self.kind == "slack":
            tmpl = "integrations/slack_message.html"
            text = render_to_string(tmpl, {"check": check})
            payload = {
                "text": text,
                "username": "******",
                "icon_url": "https://healthchecks.io/static/img/[email protected]"
            }

            r = requests.post(self.value, json=payload, timeout=5)

            n.status = r.status_code
            n.save()
        elif self.kind == "hipchat":
            tmpl = "integrations/hipchat_message.html"
            text = render_to_string(tmpl, {"check": check})
            payload = {
                "message": text,
                "color": "green" if check.status == "up" else "red",
            }

            r = requests.post(self.value, json=payload, timeout=5)

            n.status = r.status_code
            n.save()

        elif self.kind == "pd":
            if check.status == "down":
                event_type = "trigger"
                description = "%s is DOWN" % check.name_then_code()
            else:
                event_type = "resolve"
                description = "%s received a ping and is now UP" % \
                    check.name_then_code()

            payload = {
                "service_key": self.value,
                "incident_key": str(check.code),
                "event_type": event_type,
                "description": description,
                "client": "healthchecks.io",
                "client_url": settings.SITE_ROOT
            }

            url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json"
            r = requests.post(url, data=json.dumps(payload), timeout=5)

            n.status = r.status_code
            n.save()
    def __init__(self, groups, board, inject_xml_file_name):
        all_json=dict()
        all_json['version']=1
        all_params=[]
        all_json['parameters']=all_params

        schema_map = {
                        "short_desc": "shortDesc",
			"long_desc": "longDesc",
			"unit": "units",
			}
        schema_map_typed = {
			"min": "min",
			"max": "max",
			"increment": "increment",
			}
        schema_map_fix_type = {
			"reboot_required": ("rebootRequired", bool),
			"decimal": ("decimalPlaces", int),
			}
        allowed_types = { "Uint8", "Int8", "Uint16", "Int16", "Uint32", "Int32", "Float"}

        last_param_name = ""
        board_specific_param_set = False
        for group in groups:
            group_name=group.GetName()

            def get_typed_value(value: str, type_name: str):
                if type_name == 'Float': return float(value)
                if type_name == 'Int32': return int(value)
                return value

            for param in group.GetParams():
                if (last_param_name == param.GetName() and not board_specific_param_set) or last_param_name != param.GetName():
                    curr_param=dict()
                    curr_param['name'] = param.GetName()
                    type_name = param.GetType().capitalize()
                    curr_param['type'] = type_name
                    if not type_name in allowed_types:
                        print("Error: %s type not supported: curr_param['type']" % (curr_param['name'],curr_param['type']) )
                        sys.Exit(1)
                    curr_param['default'] = get_typed_value(param.GetDefault(), type_name)

                    curr_param['group'] = group_name
                    if param.GetCategory():
                        curr_param['category'] = param.GetCategory()
                    else:
                        curr_param['category'] = 'Standard'

                    if param.GetVolatile():
                        curr_param['volatile'] = True

                    last_param_name = param.GetName()
                    for code in param.GetFieldCodes():
                        value = param.GetFieldValue(code)
                        if code == "board":
                            if value == board:
                                board_specific_param_set = True
                                # JSON schema has no field for board_specific schema. Ignore.
                            else:
                                #xml_group.remove(xml_param)
                                continue
                        else:
                            #map PX4 param field names to schema names
                            if code in schema_map:
                                curr_param[schema_map[code]] = value
                            elif code in schema_map_typed:
                                curr_param[schema_map_typed[code]] = get_typed_value(value, type_name)
                            elif code in schema_map_fix_type:
                                curr_param[schema_map_fix_type[code][0]] = schema_map_fix_type[code][1](value)
                            else:
                                print('ERROR: Field not in json schema: %s' % code)
                                sys.exit(1)


                if last_param_name != param.GetName():
                    board_specific_param_set = False

                enum_codes=param.GetEnumCodes() or '' # Gets numerical values for parameter.
                if enum_codes:
                    enum_codes=sorted(enum_codes,key=float)
                    codes_list=list()
                    for item in enum_codes:
                        code_dict=dict()
                        code_dict['value']=get_typed_value(item, type_name)
                        code_dict['description']=param.GetEnumValue(item)
                        codes_list.append(code_dict)
                    curr_param['values'] = codes_list


                if len(param.GetBitmaskList()) > 0:
                    bitmasks_list=list()
                    for index in param.GetBitmaskList():
                        bitmask_dict=dict()
                        bitmask_dict['index']=int(index)
                        bitmask_dict['description']=param.GetBitmaskBit(index)
                        bitmasks_list.append(bitmask_dict)
                    curr_param['bitmask'] = bitmasks_list


                all_params.append(curr_param)


        #Json string output.
        self.output = json.dumps(all_json,indent=2)
        def Send(self, topics, msg):

            if msg[6] == "f6":
                jsonMsg = {
                    'packet': {
                        'header': {
                            'syncByte': msg[0],
                            'dataLength': msg[1] + msg[2],
                            'optionalDataLength': msg[3],
                            'packetType': msg[4],
                            'CRC8H': msg[5]
                        },
                        'data': {
                            'RORG': msg[6],
                            'data': msg[7],
                            'senderID': msg[8] + msg[9] + msg[10] + msg[11],
                            'status': msg[12]
                        },
                        'optionalData': {
                            'subTelNum': msg[13],
                            'destinationID':
                            msg[14] + msg[15] + msg[16] + msg[17],
                            'dBm': msg[18],
                            'securityLevel': msg[19],
                            'CRC8D': msg[20]
                        }
                    }
                }

            if msg[6] == "a5":
                jsonMsg = {
                    'packet': {
                        'header': {
                            'syncByte': msg[0],
                            'dataLength': msg[1] + msg[2],
                            'optionalDataLength': msg[3],
                            'packetType': msg[4],
                            'CRC8H': msg[5]
                        },
                        'data': {
                            'RORG': msg[6],
                            'DB3': msg[7],
                            'DB2': msg[8],
                            'DB1': msg[9],
                            'DB0': msg[10],
                            'senderID': msg[11] + msg[12] + msg[13] + msg[14],
                            'status': msg[15]
                        },
                        'optionalData': {
                            'subTelNum': msg[16],
                            'destinationID':
                            msg[17] + msg[18] + msg[19] + msg[20],
                            'dBm': msg[21],
                            'securityLevel': msg[22],
                            'CRC8D': msg[23]
                        }
                    }
                }
            # print(jsonMsg)
            jsonMsg = json.dumps(jsonMsg)
            self.__mqtt.sendMessage(self.__topic, jsonMsg)
            })
        resource = doc.entity(
            'bdp:xgbq-327x', {
                'prov:label': 'RealTimeRedLine',
                prov.model.PROV_TYPE: 'ont:DataResource',
                'ont:Extension': 'json'
            })
        get_MBTAinfo = doc.activity(
            'log:uuid' + str(uuid.uuid4()), startTime, endTime, {
                prov.model.PROV_LABEL: 'RealTimeRedLine',
                prov.model.PROV_TYPE: 'ont:DataSet'
            })
        doc.wasAssociatedWith(get_MBTAinfo, this_script)
        doc.usage(get_MBTAinfo, resource, startTime)
        MBTAReport = doc.entity(
            'dat:lc546_jofranco#RealTimeRedLine', {
                prov.model.PROV_LABEL: 'Real Time',
                prov.model.PROV_TYPE: 'ont:DataSet'
            })
        doc.wasAttributedTo(MBTAReport, this_script)
        doc.wasGeneratedBy(MBTAReport, get_MBTAinfo, endTime)
        doc.wasDerivedFrom(MBTAReport, resource, get_MBTAinfo, get_MBTAinfo,
                           get_MBTAinfo)
        return doc


MBTAredlinedata.execute()
doc = MBTAredlinedata.provenance()
print(doc.get_provn())
print(json.dumps(json.loads(doc.serialize()), indent=4))
def save_data(data, name):
    cars_json_data = json.dumps(data, cls=CarsDataEncoder, indent=2)

    with open(f"{name}.json", "w") as f:
        f.write(cars_json_data)
Exemple #38
0
def parse(decoder, input, json_, output_files, cleanup, tempdir, enable_ida_log, timeout):
    """
    Parses given input with given parser.

    \b
    DECODER: Name of decoder to run.
    INPUT: One or more input file paths. (Wildcards are allowed).

    \b
    Common usages::
        kordesii parse foo ./malware.bin                         - Run foo decoder on ./malware.bin
        kordesii parse foo ./repo/*                              - Run foo decoder on files found in repo directory.
        kordesii parse --json foo ./malware.bin                  - Run foo decoder and display results as json.
    """
    # Python won't process wildcards when used through Windows command prompt.
    if any('*' in path for path in input):
        new_input = []
        for path in input:
            if '*' in path:
                new_input.extend(glob.glob(path))
            else:
                new_input.append(path)
        input = new_input

    input_files = list(filter(os.path.isfile, input))
    if not input_files:
        sys.exit('Unable to find any input files.')

    # Run Kordesii
    try:
        reporter = kordesii.Reporter(
            tempdir=tempdir,
            disabletempcleanup=not cleanup,
        )
        results = []
        for path in input_files:
            logger.info('Parsing: {}'.format(path))
            input_file = os.path.abspath(path)
            reporter.run_decoder(
                decoder, input_file,
                timeout=timeout,
                log=enable_ida_log,
                cleanup_txt_files=cleanup,
                cleanup_idb_files=cleanup,
                cleanup_output_files=not output_files,
            )
            # TODO: Pull errors and ida logs from logger?
            result = reporter.metadata
            if reporter.errors:
                result['errors'] = reporter.errors
            if reporter.ida_log:
                result['ida_log'] = reporter.ida_log
            results.append(result)
            if not json_:
                reporter.print_report()

        if json_:
            print(json.dumps(results, indent=4))

    except Exception as e:
        error_message = "Error running DC3-Kordesii: {}".format(e)
        traceback.print_exc()
        if format == 'json':
            print(json.dumps({'errors': [error_message]}))
        else:
            print(error_message)
        sys.exit(1)
Exemple #39
0
 def _send(self, msg):
     sys.stdout.write(json.dumps(msg) + "\n")
     sys.stdout.flush()
Exemple #40
0
import requests as R
import json
from randomdigit import *
url = "http://api-test.nanopay.in.fg-example.com/openApi/v1/ordercreate"
header = {'Content-Type': 'application/json'}
thirdOrderId = rand_num(12)
merchantUserId = rand_num(18)
data = {
    "merchantId": 2020000808,
    "thirdOrderId": thirdOrderId,
    "orderAmount": "200",
    "transType": 7,
    "expireTime": 888888,
    "merchantUserId": "32033708",
    "installmentId": 1,
    "goodsName": [{
        "goodsName": "big apples"
    }],
    "appId": 2019010925,
    "timestamp": 1582547787122,
    "version": "1.0",
    "sign": "YS3zxZvSoG52%)($"
}
res = R.post(url=url, headers=header, data=json.dumps(data)).json()

ts = res['data']['transSerial']
tc = res['data']['tmpCode']

url1 = "https://h5.nanopay.in.fg-example.com/h5sdk/home/index.html?tmpCode=%s&transSerial=%s" % (
    tc, ts)
print(url1)
Exemple #41
0
def plot_graphs():
    """
        Plotting all graphs
    """

    global trsl_instance, tree_data, length_fragment_row_indices_list
    global depth_list, sets_count, sets, xi

    bfs([trsl_instance.root])
    plt.xlabel("Level Avg Entropy vs Level.png")
    plt.ylabel("Avg Entropy")
    plt.plot(
        xrange(0, len(tree_data)),
        map(lambda x: x['avg_entropy'], tree_data),
        label="Probabilistic Avg Entropy"
    )
    plt.legend()
    plt.savefig(
        "Avg Entropy vs Level.png"
    )
    plt.figure()
    plt.xlabel("Level")
    plt.ylabel("log2(No of Nodes)")
    plt.plot(
        xrange(0, len(tree_data)),
        map(lambda x: math.log(x['no_of_nodes'], 2), tree_data)
    )
    plt.savefig(
        "No of Nodes vs Level.png"
    )

    plt.figure()
    plt.xlabel("Depth")
    plt.ylabel("No of Fragment Indices")
    plt.plot(depth_list, length_fragment_row_indices_list)
    plt.savefig(
        "Depth vs No of Fragment Indices"
    )

    plt.figure()
    plt.xlabel("Set index")
    plt.ylabel("No of Questions")
    plt.bar(xrange(0, len(sets_count)), sets_count)
    plt.savefig(
        "Set index vs no of questions.png"
    )
    open(
        trsl_instance.filename + ".set_index", "w"
    ).write(json.dumps(zip(map(list, sets), sets_count)))

    plt.figure()
    tokenizer = RegexpTokenizer(r'(\w+(\'\w+)?)|\.')
    common_words = Counter(
        tokenizer.tokenize(
            open(
                trsl_instance.filename,
                "r"
            ).read().lower()
        )
    )
    sets_avg_freq = []

    for s in sets:
        temp_list = []
        for word in s:
            temp_list.append(common_words[word])
        temp_list.sort()
        sets_avg_freq.append(temp_list[len(temp_list) / 2])
    plt.xlabel("Median frequency of words in set")
    plt.ylabel("No of Questions")
    plt.bar(sets_avg_freq, sets_count[:len(sets)])
    plt.savefig(
        "Median Freq of set vs no of questions.png"
    )

    plt.figure()
    plt.xlabel("Predictor Variable index")
    plt.ylabel("No of Questions")
    plt.bar(range(0, len(xi)), xi)
    plt.savefig(
        "Xi vs no of questions.png"
    )
def pretty_print(filename):
    # just print
    print(json.dumps(read_json(filename), indent=4, sort_keys=True))
Exemple #43
0
def gconnect():
    # Validate state token
    if request.args.get('state') != login_session['state']:
        response = make_response(json.dumps('Invalid state parameter.'), 401)
        response.headers['Content-Type'] = 'application/json'
        return response
    # Obtain authorization code
    code = request.data

    try:
        # Upgrade the authorization code into a credentials object
        oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
        oauth_flow.redirect_uri = 'postmessage'
        credentials = oauth_flow.step2_exchange(code)
    except FlowExchangeError:
        response = make_response(
            json.dumps('Failed to upgrade the authorization code.'), 401)
        response.headers['Content-Type'] = 'application/json'
        return response

    # Check that the access token is valid.
    access_token = credentials.access_token
    url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
           % access_token)
    h = httplib2.Http()
    result = json.loads(h.request(url, 'GET')[1].decode("utf8"))
    # If there was an error in the access token info, abort.
    if result.get('error') is not None:
        response = make_response(json.dumps(result.get('error')), 500)
        response.headers['Content-Type'] = 'application/json'
        return response

    # Verify that the access token is used for the intended user.
    gplus_id = credentials.id_token['sub']
    if result['user_id'] != gplus_id:
        response = make_response(
            json.dumps("Token's user ID doesn't match given user ID."), 401)
        response.headers['Content-Type'] = 'application/json'
        return response

    # Verify that the access token is valid for this app.
    if result['issued_to'] != CLIENT_ID:
        response = make_response(
            json.dumps("Token's client ID does not match app's."), 401)
        print "Token's client ID does not match app's."
        response.headers['Content-Type'] = 'application/json'
        return response

    stored_credentials = login_session.get('credentials')
    stored_gplus_id = login_session.get('gplus_id')
    if stored_credentials is not None and gplus_id == stored_gplus_id:
        response = make_response(json.dumps('Current user is already connected.'),
                                 200)
        response.headers['Content-Type'] = 'application/json'
        return response

    # Store the access token in the session for later use.
    login_session['access_token'] = credentials.access_token
    login_session['gplus_id'] = gplus_id

    # Get user info
    userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
    params = {'access_token': credentials.access_token, 'alt': 'json'}
    answer = requests.get(userinfo_url, params=params)

    data = answer.json()

    login_session['username'] = data['name']
    login_session['picture'] = data['picture']
    login_session['email'] = data['email']
    # ADD PROVIDER TO LOGIN SESSION
    login_session['provider'] = 'google'

    # see if user exists, if it doesn't make a new one
    user_id = get_user_id(data["email"])
    if not user_id:
        user_id = create_user(login_session)
    login_session['user_id'] = user_id

    output = ''
    output += '<h3>Welcome, '
    output += login_session['username']
    output += '!</h3>'
    output += '<img src="'
    output += login_session['picture']
    output += ' " style = "width: 300px; height: 300px;border-radius: 150px;-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
    flash("You are now logged in as %s" % login_session['username'], "alert-success")
    return output
Exemple #44
0
def fbconnect():
    if request.args.get('state') != login_session['state']:
        response = make_response(json.dumps('Invalid state parameter.'), 401)
        response.headers['Content-Type'] = 'application/json'
        return response
    access_token = request.data
    print "access token received %s " % access_token

    app_id = json.loads(open('fb_client_secrets.json', 'r').read())[
        'web']['app_id']
    app_secret = json.loads(
        open('fb_client_secrets.json', 'r').read())['web']['app_secret']
    url = 'https://graph.facebook.com/oauth/access_token?grant_type=fb_exchange_token&client_id=%s&client_secret=%s&fb_exchange_token=%s' % (
        app_id, app_secret, access_token)
    h = httplib2.Http()
    result = h.request(url, 'GET')[1]

    # Use token to get user info from API
    userinfo_url = "https://graph.facebook.com/v2.4/me"
    # strip expire tag from access token
    token = result.split("&")[0]

    url = 'https://graph.facebook.com/v2.4/me?%s&fields=name,id,email' % token
    h = httplib2.Http()
    result = h.request(url, 'GET')[1]
    # print "url sent for API access:%s"% url
    # print "API JSON result: %s" % result
    data = json.loads(result)
    login_session['provider'] = 'facebook'
    login_session['username'] = data["name"]
    login_session['email'] = data["email"]
    login_session['facebook_id'] = data["id"]

    # The token must be stored in the login_session in order to properly
    # logout, let's strip out the information before the equals sign in our
    # token
    stored_token = token.split("=")[1]
    login_session['access_token'] = stored_token

    # Get user picture
    url = 'https://graph.facebook.com/v2.4/me/picture?%s&redirect=0&height=200&width=200' % token
    h = httplib2.Http()
    result = h.request(url, 'GET')[1]
    data = json.loads(result)

    login_session['picture'] = data["data"]["url"]

    # see if user exists
    user_id = get_user_id(login_session['email'])
    if not user_id:
        user_id = create_user(login_session)
    login_session['user_id'] = user_id

    output = ''
    output += '<h3>Welcome, '
    output += login_session['username']

    output += '!</h3>'
    output += '<img src="'
    output += login_session['picture']
    output += ' " style = "width: 300px; height: 300px;border-radius: 150px;-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '

    flash("Now logged in as %s" % login_session['username'], "alert-success")
    return output
Exemple #45
0
 def process_bind_param(self, value, dialect):
     if value is not None:
         value = json.dumps(value)
     return value
async def report_to_slack(output, webhook):
    payload = {
        "text": f"lbrynet {__version__} ({system_info.get_platform()['platform']}) time to first byte:\n{output}"
    }
    async with aiohttp.request('post', webhook, data=json.dumps(payload)):
        pass
Exemple #47
0
    def test_process_allow_multi_around_normalization(self, mock_client):
        """Rules Engine - Threat Intel is enabled run multi-round_normalization"""

        @rule(datatypes=['fileHash'], outputs=['s3:sample_bucket'])
        def match_file_hash(rec): # pylint: disable=unused-variable
            """Testing dummy rule to match file hash"""
            return 'streamalert:ioc' in rec and 'md5' in rec['streamalert:ioc']

        @rule(datatypes=['fileHash'], outputs=['s3:sample_bucket'])
        def match_file_hash_again(_): # pylint: disable=unused-variable
            """Testing dummy rule to match file hash again"""
            return False

        @rule(datatypes=['fileHash', 'sourceDomain'], outputs=['s3:sample_bucket'])
        def match_source_domain(rec): # pylint: disable=unused-variable
            """Testing dummy rule to match source domain and file hash"""
            return 'streamalert:ioc' in rec

        mock_client.return_value = MockDynamoDBClient()
        toggled_config = self.config
        toggled_config['global']['threat_intel']['enabled'] = True
        toggled_config['global']['threat_intel']['dynamodb_table'] = 'test_table_name'

        new_rules_engine = RulesEngine(toggled_config)
        kinesis_data = {
            "Field1": {
                "SubField1": {
                    "key1": 17,
                    "key2_md5": "md5-of-file",
                    "key3_source_domain": "evil.com"
                },
                "SubField2": 1
            },
            "Field2": {
                "Authentication": {}
            },
            "Field3": {},
            "Field4": {}
        }

        kinesis_data = json.dumps(kinesis_data)
        service, entity = 'kinesis', 'test_stream_threat_intel'
        raw_record = make_kinesis_raw_record(entity, kinesis_data)
        payload = load_and_classify_payload(toggled_config, service, entity, raw_record)
        alerts, normalized_records = new_rules_engine.run(payload)

        # Two testing rules are for threat intelligence matching. So no alert will be
        # generated before threat intel takes effect.
        assert_equal(len(alerts), 0)

        # One record will be normalized once by two different rules with different
        # normalization keys.
        assert_equal(len(normalized_records), 1)
        assert_equal(normalized_records[0].pre_parsed_record['streamalert:normalization'].keys(),
                     ['fileHash', 'sourceDomain'])

        # Pass normalized records to threat intel engine.
        alerts_from_threat_intel = new_rules_engine.threat_intel_match(normalized_records)
        assert_equal(len(alerts_from_threat_intel), 2)
        assert_equal(alerts_from_threat_intel[0].rule_name, 'match_file_hash')
        assert_equal(alerts_from_threat_intel[1].rule_name, 'match_source_domain')
Exemple #48
0
 def dump_cache(cls, filename):
     with open(filename, 'w') as f:
         to_dump = {k: tx.serialize().hex() for k, tx in cls.cache.items()}
         s = json.dumps(to_dump, sort_keys=True, indent=4)
         f.write(s)
Exemple #49
0
    def test_match_types(self):
        """Rules Engine - Match normalized types against record"""
        @rule(logs=['cloudwatch:test_match_types'],
              outputs=['s3:sample_bucket'],
              datatypes=['sourceAddress'])
        def match_ipaddress(rec): # pylint: disable=unused-variable
            """Testing rule to detect matching IP address

            Datatype 'sourceAddress' is defined in tests/unit/conf/types.json
            for cloudwatch logs. This rule should be trigger by testing event.
            """
            results = fetch_values_by_datatype(rec, 'sourceAddress')

            return any(result == '1.1.1.2' for result in results)

        @rule(logs=['cloudwatch:test_match_types'],
              outputs=['s3:sample_bucket'],
              datatypes=['sourceAddress', 'command'])
        def mismatch_types(rec): # pylint: disable=unused-variable
            """Testing rule with non-existing normalized type in the record.

            Datatype 'sourceAddress' is defined in tests/unit/conf/types.json
            for cloudwatch logs, but 'command' is not. This rule should be
            triggered by testing event since we change rule parameter 'datatypes'
            to OR operation among CEF types. See the discussion at
            https://github.com/airbnb/streamalert/issues/365
            """
            results = fetch_values_by_datatype(rec, 'sourceAddress')

            return any(result == '2.2.2.2' for result in results)

        kinesis_data_items = [
            {
                'account': 123456,
                'region': '123456123456',
                'source': '1.1.1.2',
                'detail': {
                    'eventName': 'ConsoleLogin',
                    'sourceIPAddress': '1.1.1.2',
                    'recipientAccountId': '654321'
                }
            },
            {
                'account': 654321,
                'region': '654321654321',
                'source': '2.2.2.2',
                'detail': {
                    'eventName': 'ConsoleLogin',
                    'sourceIPAddress': '2.2.2.2',
                    'recipientAccountId': '123456'
                }
            }
        ]

        # prepare payloads
        alerts = []
        for data in kinesis_data_items:
            kinesis_data = json.dumps(data)
            # prepare the payloads
            service, entity = 'kinesis', 'test_kinesis_stream'
            raw_record = make_kinesis_raw_record(entity, kinesis_data)
            payload = load_and_classify_payload(self.config, service, entity, raw_record)

            alerts.extend(self.rules_engine.run(payload)[0])

        # check alert output
        assert_equal(len(alerts), 2)

        # alert tests
        assert_equal(alerts[0].rule_name, 'match_ipaddress')
Exemple #50
0
    def test_process_optional_logs(self):
        """Rules Engine - Logs is optional when datatypes are present"""
        @rule(datatypes=['sourceAddress'],
              outputs=['s3:sample_bucket'])
        def no_logs_has_datatypes(rec): # pylint: disable=unused-variable
            """Testing rule when logs is not present, datatypes is"""
            results = fetch_values_by_datatype(rec, 'sourceAddress')

            for result in results:
                if result == '1.1.1.2':
                    return True
            return False

        @rule(logs=['cloudwatch:test_match_types'],
              outputs=['s3:sample_bucket'])
        def has_logs_no_datatypes(rec): # pylint: disable=unused-variable
            """Testing rule when logs is present, datatypes is not"""

            return (
                rec['source'] == '1.1.1.2' or
                rec['detail']['sourceIPAddress'] == '1.1.1.2'
            )

        @rule(logs=['cloudwatch:test_match_types'],
              datatypes=['sourceAddress'],
              outputs=['s3:sample_bucket'])
        def has_logs_datatypes(rec): # pylint: disable=unused-variable
            """Testing rule when logs is present, datatypes is"""
            results = fetch_values_by_datatype(rec, 'sourceAddress')

            for result in results:
                if result == '1.1.1.2':
                    return True
            return False

        kinesis_data_items = [
            {
                'account': 123456,
                'region': '123456123456',
                'source': '1.1.1.2',
                'detail': {
                    'eventName': 'ConsoleLogin',
                    'sourceIPAddress': '1.1.1.2',
                    'recipientAccountId': '654321'
                }
            }
        ]

        alerts = []
        for data in kinesis_data_items:
            kinesis_data = json.dumps(data)
            service, entity = 'kinesis', 'test_kinesis_stream'
            raw_record = make_kinesis_raw_record(entity, kinesis_data)
            payload = load_and_classify_payload(self.config, service, entity, raw_record)

            alerts.extend(self.rules_engine.run(payload)[0])

        assert_equal(len(alerts), 3)
        rule_names = ['no_logs_has_datatypes',
                      'has_logs_no_datatypes',
                      'has_logs_datatypes']
        assert_items_equal([alerts[i].rule_name for i in range(3)], rule_names)
Exemple #51
0
        
    },
    "scene":"defaultScene",
        "scenes":{
            "defaultScene":{
                "nodes":[
                    "node_3"
                ]
        }
    },
    "extensionsUsed":[
        "KHR_binary_glTF"
    ]
}

jsonstr = json.dumps(jsonobj)

print jsonstr

with open('c:/tests/x3d2/x3d2/src/assets/test_e.glb', 'wb+') as f:
    # head
    f.write('glTF')
    f.write(struct.pack('i',1))

    jsonlength=sys.getsizeof(jsonstr) 
    length =20+jsonlength+78
    f.write(struct.pack('i',length)) #filelength
    f.write(struct.pack('i',jsonlength)) #contentlength
    f.write(struct.pack('i',0)) #contentformat

Exemple #52
0
    def test_process_subkeys_nested_records(self):
        """Rules Engine - Required Subkeys with Nested Records"""
        def cloudtrail_us_east_logs(rec):
            return (
                'us-east' in rec['awsRegion'] and
                'AWS' in rec['requestParameters']['program']
            )
        rule_attrs = Rule(
            cloudtrail_us_east_logs,
            rule_name='cloudtrail_us_east_logs',
            matchers=[],
            datatypes=[],
            logs=['test_log_type_json_nested'],
            merge_by_keys=[],
            merge_window_mins=0,
            outputs=['s3:sample_bucket'],
            req_subkeys={'requestParameters': ['program']},
            context={}
        )

        data = json.dumps({
            'Records': [
                {
                    'eventVersion': '1.05',
                    'eventID': '2',
                    'eventTime': '3',
                    'requestParameters': {
                        'program': 'AWS CLI'
                    },
                    'eventType': 'CreateSomeResource',
                    'responseElements': 'Response',
                    'awsRegion': 'us-east-1',
                    'eventName': 'CreateResource',
                    'userIdentity': {
                        'name': 'john',
                        'key': 'AVC124313414'
                    },
                    'eventSource': 'Kinesis',
                    'requestID': '12345',
                    'userAgent': 'AWS CLI v1.3109',
                    'sourceIPAddress': '127.0.0.1',
                    'recipientAccountId': '123456123456'
                },
                {
                    'eventVersion': '1.05',
                    'eventID': '2',
                    'eventTime': '3',
                    'requestParameters': {
                        'program': 'AWS UI'
                    },
                    'eventType': 'CreateSomeOtherResource',
                    'responseElements': 'Response',
                    'awsRegion': 'us-east-2',
                    'eventName': 'CreateResource',
                    'userIdentity': {
                        'name': 'ann',
                        'key': 'AD114313414'
                    },
                    'eventSource': 'Lambda',
                    'requestID': '12345',
                    'userAgent': 'Google Chrome 42',
                    'sourceIPAddress': '127.0.0.2',
                    'recipientAccountId': '123456123456'
                },
                {
                    'eventVersion': '1.05',
                    'eventID': '2',
                    'eventTime': '3',
                    # Translates from null in JSON to None in Python
                    'requestParameters': None,
                    'eventType': 'CreateSomeResource',
                    'responseElements': 'Response',
                    'awsRegion': 'us-east-1',
                    'eventName': 'CreateResource',
                    'userIdentity': {
                        'name': 'john',
                        'key': 'AVC124313414'
                    },
                    'eventSource': 'Kinesis',
                    'requestID': '12345',
                    'userAgent': 'AWS CLI',
                    'sourceIPAddress': '127.0.0.1',
                    'recipientAccountId': '123456123456'
                }
            ]
        })

        schema = self.config['logs']['test_cloudtrail']['schema']
        options = self.config['logs']['test_cloudtrail']['configuration']

        parser_class = get_parser('json')
        parser = parser_class(options)
        parsed_result = parser.parse(schema, data)

        valid_record = [
            rec for rec in parsed_result if rec['requestParameters'] is not None][0]
        valid_subkey_check = RulesEngine.process_subkeys(valid_record, 'json', rule_attrs)
        assert_true(valid_subkey_check)

        invalid_record = [
            rec for rec in parsed_result if rec['requestParameters'] is None][0]
        invalid_subkey_check = RulesEngine.process_subkeys(invalid_record, 'json', rule_attrs)
        assert_false(invalid_subkey_check)
def get_id_token_json(request):
    return json.dumps(request.session['id_token'])
Exemple #54
0
def get_stats(username):
    ghuser = username.lower()
    firstname = flask.request.args.get("firstname", username)

    eventscores = flask.g.redis.zrevrangebyscore("gh:user:{0}:event"
                                                 .format(ghuser), "+inf", 5,
                                                 0, 10, withscores=True)
    events = [e[0] for e in eventscores]
    evtcounts = [int(e[1]) for e in eventscores]

    # Get the user histogram.
    pipe = flask.g.redis.pipeline()

    # Get the time zone.
    pipe.get("gh:user:{0}:tz".format(ghuser))

    # Get the total number of events.
    pipe.zscore("gh:user", ghuser)

    # Get full commit schedule.
    pipe.hgetall("gh:user:{0}:date".format(ghuser))

    # Get the daily schedule for each type of event.
    [pipe.hgetall("gh:user:{0}:event:{1}:day".format(ghuser, e))
     for e in events]

    # Get the hourly schedule for each type of event.
    [pipe.hgetall("gh:user:{0}:event:{1}:hour".format(ghuser, e))
     for e in events]

    # Get the distribution of languages contributed to.
    pipe.zrevrange("gh:user:{0}:lang".format(ghuser), 0, -1, withscores=True)

    # Get the vulgarity (and vulgar rank) of the user.
    pipe.zrevrange("gh:user:{0}:curse".format(ghuser), 0, -1, withscores=True)
    pipe.zcount("gh:curse:user", 4, "+inf")
    pipe.zrevrank("gh:curse:user", ghuser)

    # Get connected users.
    pipe.zrevrangebyscore("gh:user:{0}:connection".format(ghuser), "+inf", 5,
                          0, 5)

    # Fetch the data from the database.
    raw = pipe.execute()

    # Get the general stats.
    tz = int(raw[0]) if raw[0] is not None and raw[0] != "None" else None
    total = int(raw[1]) if raw[1] is not None else 0

    if total == 0:
        return json.dumps({"message":
                           "Couldn't find any stats for this user."}), 404

    # Get the schedule histograms.
    n, m = 3, len(events)
    week = zip(*[make_hist(d.iteritems(), 7)
                 for k, d in zip(events, raw[n:n + m])])
    offset = tz + 8 if tz is not None else 0
    day = zip(*[make_hist(d.iteritems(), 24, offset=offset)
                for k, d in zip(events, raw[n + m:n + 2 * m])])

    # If there's no weekly schedule, we don't have enough info.
    if not len(week):
        return json.dumps({"message":
                           "Couldn't find any stats for this user."}), 404

    # Get the language proportions.
    n = n + 2 * m
    langs = raw[n]
    curses = raw[n + 1]

    # Parse the vulgarity factor.
    vulgarity = None
    try:
        vulgarity = int(100 * float(raw[n + 3]) / float(raw[n + 2])) + 1
    except:
        pass

    # Get the connected users.
    connections = [c for c in raw[n + 4] if c.lower() != ghuser]

    # Get language rank.
    langrank = None
    langname = None
    if len(langs):
        lang = langs[0][0]
        langname = language_users.get(lang, "{0} expert".format(lang))

        # Made up number. How many contributions count as enough? 20? Sure.
        pipe.zcount("gh:lang:{0}:user".format(lang), 50, "+inf")
        pipe.zrevrank("gh:lang:{0}:user".format(lang), ghuser)
        ltot, lrank = pipe.execute()

        # This user is in the top N percent of users of language "lang".
        try:
            langrank = (lang, int(100 * float(lrank) / float(ltot)) + 1)
        except:
            pass

    # Get neighbors.
    neighbors = get_neighbors(ghuser)

    # Figure out the representative weekly schedule.
    hacker_type = "a pretty inconsistent hacker"
    if len(week):
        mu = np.sum(week, axis=1)
        mu /= np.sum(mu)
        hacker_type = mean_desc[np.argmin(np.sum(np.abs(means - mu[None, :]),
                                                 axis=1))]
    # Build a human readable summary.
    summary = "<p>"
    if langname:
        adj = np.random.choice(["a high caliber", "a heavy hitting",
                                "a serious", "an awesome",
                                "a top notch", "a trend setting",
                                "a champion"])

        summary += ("{0} is {2} <a href=\"#languages\">{1}</a>"
                    .format(firstname, langname, adj))
        if langrank and langrank[1] < 50:
            summary += (" (one of the top {0}% most active {1} users)"
                        .format(langrank[1], langrank[0]))

        if len(events):
            if events[0] in evtactions:
                summary += (" who <a href=\"#events\">would rather be {0} "
                            "instead of pushing code</a>").format(
                                evtactions[events[0]])
            elif events[0] == "PushEvent":
                summary += " who <a href=\"#events\">loves pushing code</a>"

        summary += ". "

    summary += "{0} is <a href=\"#week\">{1}</a>".format(firstname,
                                                         hacker_type)
    if len(day):
        best_hour = np.argmax(np.sum(day, axis=1))
        if 0 <= best_hour < 7:
            tod = "wee hours"
        elif 7 <= best_hour < 12:
            tod = "morning"
        elif 12 <= best_hour < 18:
            tod = "mid-afternoon"
        elif 18 <= best_hour < 21:
            tod = "evening"
        else:
            tod = "late evening"
        summary += " who seems to <a href=\"#day\">work best in the {0}</a>" \
                   .format(tod)
    summary += ". "

    if vulgarity:
        if vulgarity < 50:
            summary += ("I hate to say it but {0} does seem&mdash;as one of "
                        "the top {1}% most vulgar users on GitHub&mdash;to "
                        "be <a href=\"#swearing\">a tad foul-mouthed</a> "
                        "(with a particular affinity "
                        "for filthy words like '{2}').").format(firstname,
                                                                vulgarity,
                                                                curses[0][0])
        elif vulgarity < 100:
            summary += ("I hate to say it but {0} is becoming&mdash;as one of "
                        "the top {1}% most vulgar users on GitHub&mdash;"
                        "<a href=\"#swearing\">a tad foul-mouthed</a> "
                        "(with a particular affinity "
                        "for filthy words like '{2}').").format(firstname,
                                                                vulgarity,
                                                                curses[0][0])

    summary += "</p>"

    # Add similar and connected users to summary.
    if len(week) and (len(neighbors) or len(connections)):
        summary += "<p>"

        if len(neighbors):
            ind = np.random.randint(len(neighbors))
            summary += ("{0}'s behavior is quite similar to <a "
                        "href=\"{2}\">{1}</a>'s but <span "
                        "class=\"comparison\" data-url=\"{3}\"></span>. ") \
                .format(firstname, neighbors[ind],
                        flask.url_for(".user", username=neighbors[ind]),
                        flask.url_for(".compare", username=ghuser,
                                      other=neighbors[ind]))

            if len(neighbors) == 2:
                ind = (ind + 1) % 2
                summary += ("<a href=\"{1}\">{0}</a>'s activity stream also "
                            "shows remarkable similarities to {2}'s "
                            "behavior. ").format(neighbors[ind],
                                                 flask.url_for(".user",
                                                               username=
                                                               neighbors[ind]),
                                                 firstname)

            elif len(neighbors) > 2:
                summary += ("It would also be impossible to look at {0}'s "
                            "activity stream and not compare it to those "
                            "of ").format(firstname)

                cus = []
                for i in range(len(neighbors)):
                    if i != ind:
                        cus.append("<a href=\"{1}\">{0}</a>"
                                   .format(neighbors[i],
                                           flask.url_for(".user",
                                                         username=
                                                         neighbors[i])))

                summary += ", ".join(cus[:-1])
                summary += " and " + cus[-1] + ". "

        if len(connections):
            ind = 0
            summary += ("It seems&mdash;from their activity streams&mdash;"
                        "that {0} and <a href=\"{2}\">{1}</a> are probably "
                        "friends or at least virtual friends. With this in "
                        "mind, it's worth noting that <span "
                        "class=\"comparison\" data-url=\"{3}\"></span>. ") \
                .format(firstname, connections[ind],
                        flask.url_for(".user", username=connections[ind]),
                        flask.url_for(".compare", username=ghuser,
                                      other=connections[ind]))

            if len(connections) > 2:
                summary += ("There is also an obvious connection between "
                            "{0} and ").format(firstname)

                cus = []
                for i in range(len(connections)):
                    if i != ind:
                        cus.append("<a href=\"{1}\">{0}</a>"
                                   .format(connections[i],
                                           flask.url_for(".user",
                                                         username=
                                                         connections[i])))

                summary += ", ".join(cus[:-1])
                summary += " and " + cus[-1] + ". "

        summary += "</p>"

    # Summary text for schedule graphs.
    sctxt = ""
    if len(events):
        sctxt = ("<p>The two following graphs show {0}'s average weekly and "
                 "daily schedules. These charts give significant insight "
                 "into {0}'s character as a developer. ").format(firstname)

        if len(events) == 1:
            sctxt += "All of the events in {0}'s activity stream are {1}. " \
                .format(firstname, evtverbs.get(events[0]))

        else:
            sctxt += ("The colors in the charts indicate the fraction of "
                      "events that are ")
            for i, e in enumerate(events):
                if i == len(events) - 1:
                    sctxt += "and "
                sctxt += ("<span class=\"evttype\" data-ind=\"{1}\">{0}"
                          "</span>").format(evtverbs.get(e), i)
                if i < len(events) - 1:
                    sctxt += ", "
            sctxt += ". "

        sctxt += """</p>
<div class="hist-block">
    <div id="week" class="hist"></div>
    <div id="day" class="hist"></div>
</div>
<p>"""

        sctxt += ("Based on this average weekly schedule, we can "
                  "describe {0} as "
                  "<strong>{1}</strong>. ").format(firstname, hacker_type)

        if len(day):
            if best_hour == 0:
                tm = "midnight"
            elif best_hour == 12:
                tm = "noon"
            else:
                tm = "{0}{1}".format(best_hour % 12,
                                     "am" if best_hour < 12 else "pm")
            sctxt += ("Since {0}'s most active time is around {1}, I would "
                      "conclude that {0} works best in the "
                      "<strong>{2}</strong>. ").format(firstname, tm, tod)
            sctxt += ("It is important to note that an attempt has been made "
                      "to show the daily schedule in the correct time zone "
                      "but this procedure is imperfect at best. ")
        sctxt += "</p>"

        if len(events) > 1:
            sctxt += ("<p>The following chart shows number of events of "
                      "different types in {0}'s activity stream. In the "
                      "time frame included in this analysis, {0}'s event "
                      "stream included "
                      "a total of {1} events and they are all ") \
                .format(firstname, sum(evtcounts))

            for i, e in enumerate(events):
                if i == len(events) - 1:
                    sctxt += "or "
                sctxt += ("<span class=\"evttype\" data-ind=\"{1}\">{0}"
                          "</span>").format(evtverbs.get(e), i)
                if i < len(events) - 1:
                    sctxt += ", "
            sctxt += ". "

            sctxt += """</p><div class="hist-block">
    <div id="events"></div>
</div>"""

        if langs and len(langs) > 1:
            sctxt += ("<p>{0} has contributed to repositories in {1} "
                      "different languages. ").format(firstname, len(langs))
            sctxt += ("In particular, {0} is a serious <strong>{1}</strong> "
                      "expert").format(firstname, langs[0][0])
            ls = [float(l[1]) for l in langs]
            if (ls[0] - ls[1]) / sum(ls) < 0.25:
                sctxt += (" with a surprisingly broad knowledge of {0} "
                          "as well").format(langs[1][0])
            sctxt += ". "
            sctxt += ("The following chart shows the number of contributions "
                      "made by {0} to repositories where the main "
                      "language is listed as ").format(firstname)
            for i, l in enumerate(langs):
                if i == len(langs) - 1:
                    sctxt += "or "
                sctxt += ("<span class=\"evttype\" data-ind=\"{1}\">{0}"
                          "</span>").format(l[0], i)
                if i < len(langs) - 1:
                    sctxt += ", "
            sctxt += "."
            sctxt += """</p><div class="hist-block">
    <div id="languages"></div>
</div>"""

        if langs and len(langs) == 1:
            sctxt += ("<p>{0} seems to speak only one programming language: "
                      "<strong>{1}</strong>. Maybe it's about time to branch "
                      "out a bit.</p>").format(firstname, langs[0][0])

    # Format the results.
    results = {"summary": summary}
    results["events"] = [" ".join(re.findall("([A-Z][a-z]+)", e))
                         for e in events]
    results["event_counts"] = evtcounts
    results["tz"] = tz
    results["total"] = total
    results["week"] = week
    results["hacker_type"] = hacker_type
    results["day"] = day
    results["schedule_text"] = sctxt
    # results["activity"] = raw[2].items()
    results["languages"] = langs
    results["lang_user"] = langname
    results["language_rank"] = langrank
    results["curses"] = curses
    results["vulgarity"] = vulgarity
    results["similar_users"] = neighbors
    results["connected_users"] = connections

    return json.dumps(results)
Exemple #55
0
def generate_cert_jtable(request, option):
    """
    Generate the jtable data for rendering in the list template.

    :param request: The request for this jtable.
    :type request: :class:`django.http.HttpRequest`
    :param option: Action to take.
    :type option: str of either 'jtlist', 'jtdelete', or 'inline'.
    :returns: :class:`django.http.HttpResponse`
    """

    obj_type = Certificate
    type_ = "certificate"
    mapper = obj_type._meta['jtable_opts']
    if option == "jtlist":
        # Sets display url
        details_url = mapper['details_url']
        details_url_key = mapper['details_url_key']
        fields = mapper['fields']
        response = jtable_ajax_list(obj_type,
                                    details_url,
                                    details_url_key,
                                    request,
                                    includes=fields)
        return HttpResponse(json.dumps(response, default=json_handler),
                            content_type="application/json")
    if option == "jtdelete":
        response = {"Result": "ERROR"}
        if jtable_ajax_delete(obj_type, request):
            response = {"Result": "OK"}
        return HttpResponse(json.dumps(response, default=json_handler),
                            content_type="application/json")
    jtopts = {
        'title':
        "Certificates",
        'default_sort':
        mapper['default_sort'],
        'listurl':
        reverse('crits.%ss.views.%ss_listing' % (type_, type_),
                args=('jtlist', )),
        'deleteurl':
        reverse('crits.%ss.views.%ss_listing' % (type_, type_),
                args=('jtdelete', )),
        'searchurl':
        reverse(mapper['searchurl']),
        'fields':
        mapper['jtopts_fields'],
        'hidden_fields':
        mapper['hidden_fields'],
        'linked_fields':
        mapper['linked_fields'],
        'details_link':
        mapper['details_link'],
        'no_sort':
        mapper['no_sort']
    }
    jtable = build_jtable(jtopts, request)
    jtable['toolbar'] = [
        {
            'tooltip': "'All Certificates'",
            'text': "'All'",
            'click':
            "function () {$('#certificate_listing').jtable('load', {'refresh': 'yes'});}",
            'cssClass': "'jtable-toolbar-center'",
        },
        {
            'tooltip': "'New Certificates'",
            'text': "'New'",
            'click':
            "function () {$('#certificate_listing').jtable('load', {'refresh': 'yes', 'status': 'New'});}",
            'cssClass': "'jtable-toolbar-center'",
        },
        {
            'tooltip': "'In Progress Certificates'",
            'text': "'In Progress'",
            'click':
            "function () {$('#certificate_listing').jtable('load', {'refresh': 'yes', 'status': 'In Progress'});}",
            'cssClass': "'jtable-toolbar-center'",
        },
        {
            'tooltip': "'Analyzed Certificates'",
            'text': "'Analyzed'",
            'click':
            "function () {$('#certificate_listing').jtable('load', {'refresh': 'yes', 'status': 'Analyzed'});}",
            'cssClass': "'jtable-toolbar-center'",
        },
        {
            'tooltip': "'Deprecated Certificates'",
            'text': "'Deprecated'",
            'click':
            "function () {$('#certificate_listing').jtable('load', {'refresh': 'yes', 'status': 'Deprecated'});}",
            'cssClass': "'jtable-toolbar-center'",
        },
        {
            'tooltip': "'Add Certificate'",
            'text': "'Add Certificate'",
            'click': "function () {$('#new-certificate').click()}",
        },
    ]
    if option == "inline":
        return render_to_response(
            "jtable.html", {
                'jtable': jtable,
                'jtid': '%s_listing' % type_,
                'button': '%ss_tab' % type_
            }, RequestContext(request))
    else:
        return render_to_response("%s_listing.html" % type_, {
            'jtable': jtable,
            'jtid': '%s_listing' % type_
        }, RequestContext(request))
Exemple #56
0
def compare(username, other):
    """
    Return a human-readable distinction between 2 GitHub users.

    """
    user1, user2 = username.lower(), other.lower()

    pipe = flask.g.redis.pipeline()

    pipe.zscore("gh:user", user1)
    pipe.zscore("gh:user", user2)

    pipe.zrevrange("gh:user:{0}:event".format(user1), 0, -1, withscores=True)
    pipe.zrevrange("gh:user:{0}:event".format(user2), 0, -1, withscores=True)

    pipe.zrevrange("gh:user:{0}:lang".format(user1), 0, -1, withscores=True)
    pipe.zrevrange("gh:user:{0}:lang".format(user2), 0, -1, withscores=True)

    pipe.zscore("gh:curse:user", user1)
    pipe.zscore("gh:curse:user", user2)

    pipe.hgetall("gh:user:{0}:day".format(user1))
    pipe.hgetall("gh:user:{0}:day".format(user2))

    raw = pipe.execute()

    total1 = float(raw[0]) if raw[0] is not None else 0
    total2 = float(raw[1]) if raw[1] is not None else 0

    if not total1:
        return json.dumps({"message":
                           "No stats for user '{0}'".format(username)}), 404

    if not total2:
        return "we don't have any statistics about {0}".format(other)

    # Compare the fractional event types.
    evts1 = dict(raw[2])
    evts2 = dict(raw[3])
    diffs = []
    for e, desc in evttypes.iteritems():
        if e in evts1 and e in evts2:
            d = float(evts2[e]) / total2 / float(evts1[e]) * total1
            if d != 1:
                more = "more" if d > 1 else "less"
                if d > 1:
                    d = 1.0 / d
                diffs.append((desc.format(more=more, user=user2), d * d))

    # Compare language usage.
    langs1 = dict(raw[4])
    langs2 = dict(raw[5])
    for l in set(langs1.keys()) | set(langs2.keys()):
        n = float(langs1.get(l, 0)) / total1
        d = float(langs2.get(l, 0)) / total2
        if n != d and d > 0:
            if n > 0:
                d = d / n
            else:
                d = 1.0 / d
            more = "more" if d > 1 else "less"
            if l in languages:
                desc = languages[l]
            else:
                desc = "{{user}} is {{more}} of a {0} aficionado".format(l)
            if d > 1:
                d = 1.0 / d
            diffs.append((desc.format(more=more, user=user2), d * d))

    # Number of languages.
    nl1, nl2 = len(raw[4]), len(raw[5])
    if nl1 and nl2:
        desc = "{user} speaks {more} languages"
        if nl1 > nl2:
            diffs.append((desc.format(user=user2, more="fewer"),
                          nl2 * nl2 / nl1 / nl1))
        else:
            diffs.append((desc.format(user=user2, more="more"),
                          nl1 * nl1 / nl2 / nl2))

    # Compare the vulgarity.
    nc1 = float(raw[6]) if raw[6] else 0
    nc2 = float(raw[7]) if raw[7] else 0
    if nc1 or nc2 and nc1 != nc2:
        if nc1 > nc2:
            diffs.append(("{0} is less foul mouthed".format(user2),
                          (nc2 * nc2 + 1) / (nc1 * nc1 + 1)))
        else:
            diffs.append(("{0} is more foul mouthed".format(user2),
                          (nc1 * nc1 + 1) / (nc2 * nc2 + 1)))

    # Compare the average weekly schedules.
    week1 = map(lambda v: int(v[1]), raw[8].iteritems())
    week2 = map(lambda v: int(v[1]), raw[9].iteritems())
    mu1, mu2 = sum(week1) / 7.0, sum(week2) / 7.0
    var1 = np.sqrt(sum(map(lambda v: (v - mu1) ** 2, week1)) / 7.0) / mu1
    var2 = np.sqrt(sum(map(lambda v: (v - mu2) ** 2, week2)) / 7.0) / mu2
    if var1 or var2 and var1 != var2:
        if var1 > var2:
            diffs.append(("{0} has a more consistent weekly schedule"
                          .format(user2), var2 / var1))
        else:
            diffs.append(("{0} has a less consistent weekly schedule"
                          .format(user2), var1 / var2))

    # Compute the relative probabilities of the comparisons and normalize.
    ps = map(lambda v: v[1], diffs)
    norm = sum(ps)

    # Return the full list?
    if flask.request.args.get("full") is not None:
        diffs = zip([d[0] for d in diffs], [p / norm for p in ps])
        diffs = sorted(diffs, key=lambda v: v[1], reverse=True)
        return json.dumps(diffs)

    # Choose a random description weighted by the probabilities.
    return np.random.choice([d[0] for d in diffs], p=[p / norm for p in ps])
Exemple #57
0
def parse_eml(eml):
    hashes = []
    urls = []
    responses = []

    for part in eml.walk():
        if part.get_content_disposition() != "attachment" and \
        part.get_content_type() == "text/plain" \
        or part.get_content_type == "text/html":
            text = str(part.get_payload(decode=True)).replace("\\n", " ")
            extractor = URLExtract()
            urls = list(set(extractor.find_urls(text)))

        if part.get_content_disposition() == "attachment":
            attach = base64.b64decode(part.get_payload())
            hashes.append(hashlib.sha256(attach).hexdigest())

    print(f"hashes: {hashes}")
    print(f"urls: {urls}")

    for shasum in hashes:
        artifact = Artifact.query.filter_by(handle=shasum).first()
        if (artifact):
            print(f"{shasum} already exists in DB")
            responses.append(json.loads(artifact.response))
        else:
            params = {'apikey': vtapi, 'resource': shasum}
            headers = {"Accept-Encoding": "gzip, deflate"}
            response = requests.get(
                'https://www.virustotal.com/vtapi/v2/file/report',
                params=params,
                headers=headers)
            json_response = response.json()

            artifact = Artifact(handle=shasum,
                                response=json.dumps(json_response))
            db.session.add(artifact)
            db.session.commit()

            responses.append(json_response)

    for url in urls:
        artifact = Artifact.query.filter_by(handle=url).first()
        if (artifact):
            print(f"{url} already exists in DB")
            responses.append(json.loads(artifact.response))
        else:

            headers = {
                "Accept-Encoding": "gzip, deflate",
            }
            params = {'apikey': vtapi, 'resource': url}
            response = requests.post(
                'https://www.virustotal.com/vtapi/v2/url/report',
                params=params,
                headers=headers)
            json_response = response.json()

            artifact = Artifact(handle=url, response=json.dumps(json_response))
            db.session.add(artifact)
            db.session.commit()

            responses.append(json_response)

    return responses
Exemple #58
0
def test_long_short_and_non_numeric_tac(flask_app, api_version):
    """Test Depot ID 96788/5.

    Verify that TAC API returns a 400 status for short and non-numeric,
    shorter and longer tacs.
    """
    if api_version == 'v1':
        # non-numeric tacs
        for t in ['abc', '1abc', 'abcdefgh', '1234ABCD', '12345678ABSDEF']:
            rv = flask_app.get(
                url_for('{0}.tac_api'.format(api_version), tac=t))
            assert rv.status_code == 400
            assert b'Bad TAC format' in rv.data

        # tacs less than 8 chars long
        for t in ['1', '00', '1234567']:
            rv = flask_app.get(
                url_for('{0}.tac_api'.format(api_version), tac=t))
            assert rv.status_code == 400
            assert b'Bad TAC format' in rv.data

        # tacs longer than 8 chars long
        for t in ['123456789', '012345678', '0123456780']:
            rv = flask_app.get(
                url_for('{0}.tac_api'.format(api_version), tac=t))
            assert rv.status_code == 400
            assert b'Bad TAC format' in rv.data
    else:  # api version 2
        # non-numeric tacs for tac get api
        non_numeric_tacs = [
            'abc', '1abc', 'abcdefgh', '1234ABCD', '12345678ABSDEF'
        ]
        for t in non_numeric_tacs:
            rv = flask_app.get(
                url_for('{0}.tac_get_api'.format(api_version), tac=t))
            assert rv.status_code == 400

        # non-numeric tacs for tac post api
        headers = {'content-type': 'application/json'}
        rv = flask_app.post(url_for('{0}.tac_post_api'.format(api_version)),
                            data=json.dumps({'tacs': non_numeric_tacs}),
                            headers=headers)
        assert rv.status_code == 400

        # tacs less than 8 chars long
        invalid_tacs = ['1', '00', '1234567']
        for t in invalid_tacs:
            rv = flask_app.get(
                url_for('{0}.tac_get_api'.format(api_version), tac=t))
            assert rv.status_code == 400

        # tacs less than 8 chars long for post api
        headers = {'content-type': 'application/json'}
        rv = flask_app.post(url_for('{0}.tac_post_api'.format(api_version)),
                            data=json.dumps({'tacs': invalid_tacs}),
                            headers=headers)
        assert rv.status_code == 400

        # tacs longer than 8 chars long
        invalid_tacs = ['123456789', '012345678', '0123456780']
        for t in invalid_tacs:
            rv = flask_app.get(
                url_for('{0}.tac_get_api'.format(api_version), tac=t))
            assert rv.status_code == 400

        # tacs longer than 8 chars for post api
        headers = {'content-type': 'application/json'}
        rv = flask_app.post(url_for('{0}.tac_post_api'.format(api_version)),
                            data=json.dumps({'tacs': invalid_tacs}),
                            headers=headers)
        assert rv.status_code == 400
 def set_json_text(self):
     json_dump = json.dumps(self.current_parameters, indent=4)
     self.set_text(json_dump)
def get_access_token_json(request):
    return json.dumps(request.session['access_token'])