예제 #1
0
def engine():
    s = "mysql+pymysql://%s:%s@%s/%s" % (
        settings()['mysql_user'], settings()['mysql_password'],
        settings()['mysql_host'], settings()['mysql_database'])
    engine = create_engine(s)

    return engine
예제 #2
0
파일: __main__.py 프로젝트: srchunter/s3t
def main():
    args = parse_args()
    if args.settings:
        config.settings()
        exit(0)

    if args.list:
        s3operation.list_keys(args.bucket,
                              args.list,
                              all=args.all,
                              access_key_id=args.access_key_id,
                              access_key=args.access_key,
                              showSize=(not args.nosize))
    elif args.download:
        s3operation.download(args.bucket,
                             args.download,
                             filename=args.key,
                             showProgress=(not args.nosize))
    elif args.copy:
        s3operation.copy(args.bucket,
                         args.copy,
                         args.target,
                         target_key=args.key)
    elif args.upload:
        s3operation.upload(args.bucket, args.upload, args.key)
    elif args.remove:
        s3operation.remove(args.bucket, args.remove)
    def test_cron_cleanup_trigger_expired(self):
        # Asserts that old entities are deleted through a task queue.

        # Removes the jitter.
        def _expiration_jitter(now, expiration):
            out = now + datetime.timedelta(seconds=expiration)
            return out, out

        self.mock(model, 'expiration_jitter', _expiration_jitter)
        now = self.mock_now(datetime.datetime(2020, 1, 2, 3, 4, 5), 0)
        request = self.store_request('sha1-raw', 'Foo')
        self.call_api('store_inline', message_to_dict(request))
        self.assertEqual(1, model.ContentEntry.query().count())

        self.mock_now(now, config.settings().default_expiration)
        self.app.get('/internal/cron/cleanup/trigger/expired',
                     headers={'X-AppEngine-Cron': 'true'})
        self.assertEqual(1, model.ContentEntry.query().count())
        self.assertEqual(0, self.execute_tasks())

        # Try again, second later.
        self.mock_now(now, config.settings().default_expiration + 1)
        self.app.get('/internal/cron/cleanup/trigger/expired',
                     headers={'X-AppEngine-Cron': 'true'})
        self.assertEqual(1, model.ContentEntry.query().count())

        # The query task queue triggers deletion task queues.
        self.assertEqual(2, self.execute_tasks())
        # Boom it's gone.
        self.assertEqual(0, model.ContentEntry.query().count())
예제 #4
0
파일: api.py 프로젝트: gitaway/arvados
def api(version=None):
    global services

    if 'ARVADOS_DEBUG' in config.settings():
        logging.basicConfig(level=logging.DEBUG)

    if not services.get(version):
        apiVersion = version
        if not version:
            apiVersion = 'v1'
            logging.info("Using default API version. " +
                         "Call arvados.api('%s') instead." %
                         apiVersion)
        if 'ARVADOS_API_HOST' not in config.settings():
            raise Exception("ARVADOS_API_HOST is not set. Aborting.")
        url = ('https://%s/discovery/v1/apis/{api}/{apiVersion}/rest' %
               config.get('ARVADOS_API_HOST'))
        credentials = CredentialsFromEnv()

        # Use system's CA certificates (if we find them) instead of httplib2's
        ca_certs = '/etc/ssl/certs/ca-certificates.crt'
        if not os.path.exists(ca_certs):
            ca_certs = None             # use httplib2 default

        http = httplib2.Http(ca_certs=ca_certs,
                             cache=http_cache('discovery'))
        http = credentials.authorize(http)
        if re.match(r'(?i)^(true|1|yes)$',
                    config.get('ARVADOS_API_HOST_INSECURE', 'no')):
            http.disable_ssl_certificate_validation=True
        services[version] = apiclient.discovery.build(
            'arvados', apiVersion, http=http, discoveryServiceUrl=url)
    return services[version]
예제 #5
0
파일: api.py 프로젝트: ntijanic/arvados
def api(version=None, cache=True):
    global services

    if "ARVADOS_DEBUG" in config.settings():
        logging.basicConfig(level=logging.DEBUG)

    if not cache or not services.get(version):
        apiVersion = version
        if not version:
            apiVersion = "v1"
            logging.info("Using default API version. " + "Call arvados.api('%s') instead." % apiVersion)
        if "ARVADOS_API_HOST" not in config.settings():
            raise Exception("ARVADOS_API_HOST is not set. Aborting.")
        url = "https://%s/discovery/v1/apis/{api}/{apiVersion}/rest" % config.get("ARVADOS_API_HOST")
        credentials = CredentialsFromEnv()

        # Use system's CA certificates (if we find them) instead of httplib2's
        ca_certs = "/etc/ssl/certs/ca-certificates.crt"
        if not os.path.exists(ca_certs):
            ca_certs = None  # use httplib2 default

        http = httplib2.Http(ca_certs=ca_certs, cache=(http_cache("discovery") if cache else None))
        http = credentials.authorize(http)
        if re.match(r"(?i)^(true|1|yes)$", config.get("ARVADOS_API_HOST_INSECURE", "no")):
            http.disable_ssl_certificate_validation = True
        services[version] = apiclient.discovery.build("arvados", apiVersion, http=http, discoveryServiceUrl=url)
        http.cache = None
    return services[version]
    def setUp(self):
        """Creates a new app instance for every test case."""
        super(MainTest, self).setUp()
        self.testbed.init_user_stub()

        self.source_ip = '192.168.0.1'
        self.app = webtest.TestApp(
            handlers_frontend.create_application(debug=True),
            extra_environ={'REMOTE_ADDR': self.source_ip})

        self.auth_app = webtest.TestApp(
            auth.create_wsgi_application(debug=True),
            extra_environ={
                'REMOTE_ADDR': self.source_ip,
                'SERVER_SOFTWARE': os.environ['SERVER_SOFTWARE'],
            })

        full_access_group = config.settings().auth.full_access_group
        readonly_access_group = config.settings().auth.readonly_access_group

        auth.bootstrap_group(
            auth.ADMIN_GROUP,
            [auth.Identity(auth.IDENTITY_USER, '*****@*****.**')])
        auth.bootstrap_group(
            readonly_access_group,
            [auth.Identity(auth.IDENTITY_USER, '*****@*****.**')])
        auth.bootstrap_group(
            full_access_group,
            [auth.Identity(auth.IDENTITY_USER, '*****@*****.**')])
        self.set_as_anonymous()
예제 #7
0
def api(version=None):
    global services

    if 'ARVADOS_DEBUG' in config.settings():
        logging.basicConfig(level=logging.DEBUG)

    if not services.get(version):
        apiVersion = version
        if not version:
            apiVersion = 'v1'
            logging.info("Using default API version. " +
                         "Call arvados.api('%s') instead." % apiVersion)
        if 'ARVADOS_API_HOST' not in config.settings():
            raise Exception("ARVADOS_API_HOST is not set. Aborting.")
        url = ('https://%s/discovery/v1/apis/{api}/{apiVersion}/rest' %
               config.get('ARVADOS_API_HOST'))
        credentials = CredentialsFromEnv()

        # Use system's CA certificates (if we find them) instead of httplib2's
        ca_certs = '/etc/ssl/certs/ca-certificates.crt'
        if not os.path.exists(ca_certs):
            ca_certs = None  # use httplib2 default

        http = httplib2.Http(ca_certs=ca_certs, cache=http_cache('discovery'))
        http = credentials.authorize(http)
        if re.match(r'(?i)^(true|1|yes)$',
                    config.get('ARVADOS_API_HOST_INSECURE', 'no')):
            http.disable_ssl_certificate_validation = True
        services[version] = apiclient.discovery.build('arvados',
                                                      apiVersion,
                                                      http=http,
                                                      discoveryServiceUrl=url)
    return services[version]
예제 #8
0
def get_swift_config():
    """
    Get a SwiftConfig instance per application settings
    """
    auth_token = get_auth_token()
    container = settings('swift_container')
    swift_url = settings('swift_url')
    swift_cfg = SwiftConfig(auth_token, swift_url, container)
    return swift_cfg
예제 #9
0
    def scrape(self):
        time.sleep(5)
        if settings()['screenshot']:
            self.driver.save_screenshot('columns.png')

        try:
            element_present = EC.presence_of_element_located(
                (By.CLASS_NAME, 'column'))

        except TimeoutException:
            self.logger.warning("Timed out waiting for page to load")
            return

        self.logger.info('scraping')

        columns = self.driver.find_elements_by_class_name('column')

        for column in columns:
            title_container = column.find_element_by_xpath(
                ".//div[@data-testid='filterMessage']")
            trends = column.find_elements_by_xpath(
                ".//div[@data-testid='trend']")

            df = pd.DataFrame(index=np.arange(0, len(trends)),
                              columns=[
                                  'location', 'hashtag', 'tweets_counter',
                                  'position', 'trend_date'
                              ])

            place = clean_title(title_container.text)

            self.logger.info(title_container.text)

            for key, trend in enumerate(trends):
                uid = uuid.uuid4()
                ht_container = trend.find_element_by_xpath(
                    ".//a[@data-testid='trendLink']")

                try:
                    count_container = trend.find_element_by_xpath(
                        ".//span[@data-testid='trendDescription']")
                except NoSuchElementException:
                    count_container = None

                count = get_number_tweets(
                    count_container.text) if count_container is not None else 0

                trend_date = get_utc(settings()['places'][place]['tz'])

                df.loc[key] = [
                    place, ht_container.text, count, key + 1, trend_date
                ]

                self.enqueue(uid.hex)

            self.logger.info('Saving %s to db' % place)
            self.save(df)
예제 #10
0
 def run(self):
     self.login()
     start_time = time.time()
     while 1:
         try:
             self.scrape()
         except Exception as e:
             self.logger.error('scrape() error: %s' % str(e))
         self.rmq.sleep(settings()['interval'] - (
             (time.time() - start_time) % settings()['interval']))
예제 #11
0
파일: inbox.py 프로젝트: cobsec/pickup-stix
def inbox(stix_package):
    taxii_url = settings('inbox')['taxii_server']
    username = settings('inbox')['taxii_user']
    password = settings('inbox')['taxii_pass']
    user_pwd = username + ":" + password

    xmlstart = """<?xml version="1.0" encoding="UTF-8" ?>"""
    boilerplate = """xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:taxii_11="http://taxii.mitre.org/messages/taxii_xml_binding-1.1" xsi:schemaLocation="http://taxii.mitre.org/messages/taxii_xml_binding-1.1 http://taxii.mitre.org/messages/taxii_xml_binding-1.1" """
    message_id = str(random.randint(345271,9999999999))

    xml_inbox = xmlstart + """
<taxii_11:Inbox_Message {{boilerplate}} message_id="{{message_id}}">
    <taxii_11:Content_Block>
        <taxii_11:Content_Binding binding_id="{{content_binding}}" />
        <taxii_11:Content>
        {{content_data}}
        </taxii_11:Content>
    </taxii_11:Content_Block>
</taxii_11:Inbox_Message>"""

    xml = xml_inbox.replace('{{boilerplate}}',boilerplate) \
                   .replace('{{message_id}}',message_id) \
                   .replace('{{content_binding}}','urn:stix.mitre.org:xml:1.1.1') \
                   .replace('{{content_data}}',stix_package.to_xml())
    #print xml
    headers = [
        "Content-Type: application/xml",
        "Content-Length: " + str(len(xml)),
        "User-Agent: TAXII Client Application",
        "Accept: application/xml",
        "X-TAXII-Accept: urn:taxii.mitre.org:message:xml:1.1",
        "X-TAXII-Content-Type: urn:taxii.mitre.org:message:xml:1.1",
        "X-TAXII-Protocol: urn:taxii.mitre.org:protocol:https:1.0",
    ]

    buf = StringIO.StringIO()

    conn = pycurl.Curl()
    conn.setopt(pycurl.URL, taxii_url)
    conn.setopt(pycurl.USERPWD, user_pwd)
    conn.setopt(pycurl.HTTPHEADER, headers)
    conn.setopt(pycurl.POST, 1)
    conn.setopt(pycurl.TIMEOUT, 999999)
    conn.setopt(pycurl.WRITEFUNCTION, buf.write)
    conn.setopt(pycurl.POSTFIELDS, xml)
    conn.perform()

    hp = HTMLParser.HTMLParser()
    result = hp.unescape(buf.getvalue()).encode('ascii', 'ignore')
    print result
    root = ET.fromstring(result)
    status = root.attrib['status_type']

    return status
 def test_config(self):
     self.set_as_admin()
     resp = self.app.get('/restricted/config')
     # TODO(maruel): Use beautifulsoup?
     priv_key = 'test private key'
     params = {
         'gs_private_key': priv_key,
         'keyid': str(config.settings_info()['cfg'].key.integer_id()),
         'xsrf_token': self.get_xsrf_token(),
     }
     self.assertEqual('', config.settings().gs_private_key)
     resp = self.app.post('/restricted/config', params)
     self.assertNotIn('Update conflict', resp)
     self.assertEqual(priv_key, config.settings().gs_private_key)
 def test_config_conflict(self):
     self.set_as_admin()
     resp = self.app.get('/restricted/config')
     # TODO(maruel): Use beautifulsoup?
     params = {
         'google_analytics': 'foobar',
         'keyid': str(config.settings().key.integer_id() - 1),
         'reusable_task_age_secs': 30,
         'xsrf_token': self.get_xsrf_token(),
     }
     self.assertEqual('', config.settings().google_analytics)
     resp = self.app.post('/restricted/config', params)
     self.assertIn('Update conflict', resp)
     self.assertEqual('', config.settings().google_analytics)
예제 #14
0
 def test_config_conflict(self):
   self.set_as_admin()
   resp = self.app_frontend.get('/restricted/config')
   # TODO(maruel): Use beautifulsoup?
   params = {
     'google_analytics': 'foobar',
     'keyid': str(config.settings().key.integer_id() - 1),
     'reusable_task_age_secs': 30,
     'xsrf_token': self.get_xsrf_token(),
   }
   self.assertEqual('', config.settings().google_analytics)
   resp = self.app_frontend.post('/restricted/config', params)
   self.assertIn('Update conflict', resp)
   self.assertEqual('', config.settings().google_analytics)
예제 #15
0
 def test_config(self):
     self.set_as_admin()
     resp = self.app_frontend.get('/restricted/config')
     # TODO(maruel): Use beautifulsoup?
     params = {
         'default_expiration': 123456,
         'google_analytics': 'foobar',
         'keyid': str(config.settings().key.integer_id()),
         'xsrf_token': self.get_xsrf_token(),
     }
     self.assertEqual('', config.settings().google_analytics)
     resp = self.app_frontend.post('/restricted/config', params)
     self.assertNotIn('Update conflict', resp)
     self.assertEqual('foobar', config.settings().google_analytics)
     self.assertIn('foobar', self.app_frontend.get('/').body)
예제 #16
0
 def test_config(self):
   self.set_as_admin()
   resp = self.app_frontend.get('/restricted/config')
   # TODO(maruel): Use beautifulsoup?
   params = {
     'default_expiration': 123456,
     'google_analytics': 'foobar',
     'keyid': str(config.settings().key.integer_id()),
     'xsrf_token': self.get_xsrf_token(),
   }
   self.assertEqual('', config.settings().google_analytics)
   resp = self.app_frontend.post('/restricted/config', params)
   self.assertNotIn('Update conflict', resp)
   self.assertEqual('foobar', config.settings().google_analytics)
   self.assertIn('foobar', self.app_frontend.get('/').body)
예제 #17
0
파일: model.py 프로젝트: misscache/luci-py
def entry_key_from_id(key_id):
  """Returns the ndb.Key for the key_id."""
  hash_key = key_id.rsplit('/', 1)[1]
  N = config.settings().sharding_letters
  return ndb.Key(
      ContentEntry, key_id,
      parent=datastore_utils.shard_key(hash_key, N, 'ContentShard'))
예제 #18
0
  def post(self, namespace, timestamp):
    digests = []
    now = utils.timestamp_to_datetime(long(timestamp))
    expiration = config.settings().default_expiration
    try:
      digests = payload_to_hashes(self, namespace)
      # Requests all the entities at once.
      futures = ndb.get_multi_async(
          model.entry_key(namespace, binascii.hexlify(d)) for d in digests)

      to_save = []
      while futures:
        # Return opportunistically the first entity that can be retrieved.
        future = ndb.Future.wait_any(futures)
        futures.remove(future)
        item = future.get_result()
        if item and item.next_tag_ts < now:
          # Update the timestamp. Add a bit of pseudo randomness.
          item.expiration_ts, item.next_tag_ts = model.expiration_jitter(
              now, expiration)
          to_save.append(item)
      if to_save:
        ndb.put_multi(to_save)
      logging.info(
          'Timestamped %d entries out of %s', len(to_save), len(digests))
    except Exception as e:
      logging.error('Failed to stamp entries: %s\n%d entries', e, len(digests))
      raise
예제 #19
0
def render(name, params=None):
    """Shorthand to render a template."""
    out = {
        'google_analytics': config.settings().google_analytics,
    }
    out.update(params or {})
    return template.render(name, out)
예제 #20
0
def api_from_config(version=None, apiconfig=None, **kwargs):
    """Return an apiclient Resources object enabling access to an Arvados server
    instance.

    :version:
      A string naming the version of the Arvados REST API to use (for
      example, 'v1').

    :apiconfig:
      If provided, this should be a dict-like object (must support the get()
      method) with entries for ARVADOS_API_HOST, ARVADOS_API_TOKEN, and
      optionally ARVADOS_API_HOST_INSECURE.  If not provided, use
      arvados.config (which gets these parameters from the environment by
      default.)

    Other keyword arguments such as `cache` will be passed along `api()`

    """
    # Load from user configuration or environment
    if apiconfig is None:
        apiconfig = config.settings()

    for x in ['ARVADOS_API_HOST', 'ARVADOS_API_TOKEN']:
        if x not in apiconfig:
            raise ValueError("%s is not set. Aborting." % x)
    host = apiconfig.get('ARVADOS_API_HOST')
    token = apiconfig.get('ARVADOS_API_TOKEN')
    insecure = config.flag_is_true('ARVADOS_API_HOST_INSECURE', apiconfig)

    return api(version=version,
               host=host,
               token=token,
               insecure=insecure,
               **kwargs)
예제 #21
0
파일: api.py 프로젝트: WangZhenfei/arvados
def api_from_config(version=None, apiconfig=None, **kwargs):
    """Return an apiclient Resources object enabling access to an Arvados server
    instance.

    :version:
      A string naming the version of the Arvados REST API to use (for
      example, 'v1').

    :apiconfig:
      If provided, this should be a dict-like object (must support the get()
      method) with entries for ARVADOS_API_HOST, ARVADOS_API_TOKEN, and
      optionally ARVADOS_API_HOST_INSECURE.  If not provided, use
      arvados.config (which gets these parameters from the environment by
      default.)

    Other keyword arguments such as `cache` will be passed along `api()`

    """
    # Load from user configuration or environment
    if apiconfig is None:
        apiconfig = config.settings()

    for x in ['ARVADOS_API_HOST', 'ARVADOS_API_TOKEN']:
        if x not in apiconfig:
            raise ValueError("%s is not set. Aborting." % x)
    host = apiconfig.get('ARVADOS_API_HOST')
    token = apiconfig.get('ARVADOS_API_TOKEN')
    insecure = config.flag_is_true('ARVADOS_API_HOST_INSECURE', apiconfig)

    return api(version=version, host=host, token=token, insecure=insecure, **kwargs)
def getCurrentReadings():
    _settings = settings()
    records = []
    for room in _settings:
        sensorLookups = {}
        for sensor in room['sensors']:
            sensorLookups[str(sensor['sensor_id'])] = sensor['sensor_name']
        currentClause = getCurrentReadingClauses(room['device_name'])
        for row in con.execute(currentClause):
            room, data, date_time = row
            currData = {}
            for dm in data:
                if currData.get(
                        str(dm['sensor_id'])
                ) is None:  # TODO: str() is required due to data inconsistencty
                    currData[str(dm['sensor_id'])] = {
                        'name': sensorLookups[str(dm['sensor_id'])]
                    }
                currData[str(dm['sensor_id'])][dm['type']] = dm['value']
            records.append({
                "room_name": room,
                "readings": currData,
                "date_time": date_time
            })
    returnData = {'data': records}
    return jsonify(returnData)  # TODO: Convert date_time in DB to UTC timezone
예제 #23
0
def query(value):
  try:
    query_type = settings('kb')['kb_type']
  except KeyError:
    print "[cobstix2] Could not read kb_type from kb settings in config.ini"
    sys.exit(0)

  if query_type == 'elk':
    _index = USER
    endpoint = ELK + '%s/_search' % _index
    payload = '{"query":{"query_string":{"query": "%s"}}}' % value
    try:
      r = requests.post(endpoint, payload)
      json_content = r.json()
    except requests.exceptions.RequestException as e:
      print e
      return False
    try:
      hit_list = json_content['hits']['hits']
      obj_list = []
      for hit in hit_list:
        new_obj = dict_to_obj(hit["_source"])
        obj_list.append(new_obj)
      return obj_list
    except KeyError:
      return False
  else:
    return False
예제 #24
0
 def setUp(self):
     super(IsolateServiceTest, self).setUp()
     self.testbed.init_blobstore_stub()
     self.testbed.init_urlfetch_stub()
     # It seems like there is a singleton state preserved across the tests,
     # making it hard to re-run the complete setUp procedure. Therefore we pre-
     # register all the possible identities being used in the tests.
     all_authed_ids = [
         auth.Identity(auth.IDENTITY_USER, '*****@*****.**'),
         auth.Identity(auth.IDENTITY_USER,
                       '*****@*****.**'),
         auth.Identity(auth.IDENTITY_SERVICE, 'adminapp'),
     ]
     admin = all_authed_ids[0]
     full_access_group = config.settings().auth.full_access_group
     auth.bootstrap_group(full_access_group, all_authed_ids)
     auth_testing.mock_get_current_identity(self, admin)
     version = utils.get_app_version()
     self.mock(utils, 'get_task_queue_host', lambda: version)
     self.testbed.setup_env(current_version_id='testbed.version')
     self.source_ip = '127.0.0.1'
     # It is needed solely for self.execute_tasks(), which processes tasks queues
     # on the backend application.
     self.app = webtest.TestApp(
         handlers_backend.create_application(debug=True),
         extra_environ={'REMOTE_ADDR': self.source_ip})
     # add a private key; signing depends on config.settings()
     make_private_key()
     # Remove the check for dev server in should_push_to_gs().
     self.mock(utils, 'is_local_dev_server', lambda: False)
예제 #25
0
    def post(self, namespace, timestamp):
        digests = []
        now = utils.timestamp_to_datetime(long(timestamp))
        expiration = config.settings().default_expiration
        try:
            digests = payload_to_hashes(self)
            # Requests all the entities at once.
            futures = ndb.get_multi_async(
                model.get_entry_key(namespace, binascii.hexlify(d))
                for d in digests)

            to_save = []
            while futures:
                # Return opportunistically the first entity that can be retrieved.
                future = ndb.Future.wait_any(futures)
                futures.remove(future)
                item = future.get_result()
                if item and item.next_tag_ts < now:
                    # Update the timestamp. Add a bit of pseudo randomness.
                    item.expiration_ts, item.next_tag_ts = model.expiration_jitter(
                        now, expiration)
                    to_save.append(item)
            if to_save:
                ndb.put_multi(to_save)
            logging.info('Timestamped %d entries out of %s', len(to_save),
                         len(digests))
        except Exception as e:
            logging.error('Failed to stamp entries: %s\n%d entries', e,
                          len(digests))
            raise
예제 #26
0
  def generate_store_url(self, entry_info, namespace, http_verb, uploaded_to_gs,
                         expiration):
    """Generates a signed URL to /content-gs/store method.

    Arguments:
      entry_info: A EntryInfo instance.
    """
    # Data that goes into request parameters and signature.
    expiration_ts = str(int(time.time() + expiration.total_seconds()))
    item_size = str(entry_info.size)
    is_isolated = str(int(entry_info.is_isolated))
    uploaded_to_gs = str(int(uploaded_to_gs))

    # Generate signature.
    sig = StoreContentHandler.generate_signature(
        config.settings().global_secret, http_verb, expiration_ts, namespace,
        entry_info.digest, item_size, is_isolated, uploaded_to_gs)

    # Bare full URL to /content-gs/store endpoint.
    url_base = self.uri_for(
        'store-gs', namespace=namespace, hash_key=entry_info.digest, _full=True)

    # Construct url with query parameters, reuse auth token.
    params = {
        'g': uploaded_to_gs,
        'i': is_isolated,
        's': item_size,
        'sig': sig,
        'token': self.request.get('token'),
        'x': expiration_ts,
    }
    return '%s?%s' % (url_base, urllib.urlencode(params))
예제 #27
0
def render(name, params=None):
  """Shorthand to render a template."""
  out = {
    'google_analytics': config.settings().google_analytics,
  }
  out.update(params or {})
  return template.render(name, out)
예제 #28
0
    def generate_store_url(self, entry_info, namespace, http_verb,
                           uploaded_to_gs, expiration):
        """Generates a signed URL to /content-gs/store method.

    Arguments:
      entry_info: A EntryInfo instance.
    """
        # Data that goes into request parameters and signature.
        expiration_ts = str(int(time.time() + expiration.total_seconds()))
        item_size = str(entry_info.size)
        is_isolated = str(int(entry_info.is_isolated))
        uploaded_to_gs = str(int(uploaded_to_gs))

        # Generate signature.
        sig = StoreContentHandler.generate_signature(
            config.settings().global_secret, http_verb, expiration_ts,
            namespace, entry_info.digest, item_size, is_isolated,
            uploaded_to_gs)

        # Bare full URL to /content-gs/store endpoint.
        url_base = self.uri_for('store-gs',
                                namespace=namespace,
                                hash_key=entry_info.digest,
                                _full=True)

        # Construct url with query parameters, reuse auth token.
        params = {
            'g': uploaded_to_gs,
            'i': is_isolated,
            's': item_size,
            'sig': sig,
            'token': self.request.get('token'),
            'x': expiration_ts,
        }
        return '%s?%s' % (url_base, urllib.urlencode(params))
예제 #29
0
파일: keystone.py 프로젝트: dockerian/pyapi
def get_auth_token():
    """
    Get an auth token from Keystone.
    """
    try:
        keystone = keystone_client.Client(
            username=settings('cloud_username'),
            password=settings('cloud_password'),
            tenant_name=settings('cloud_project_name'),
            auth_url=settings('cloud_auth_url'),
            insecure=True)
        return keystone.auth_ref['token']['id']
    except Exception as e:
        LOGGER.error(
            "Exception authenticating against Keystone")
        LOGGER.exception("Details: {0}".format(e))
        raise
예제 #30
0
def entry_key_from_id(key_id):
    """Returns the ndb.Key for the key_id."""
    hash_key = key_id.rsplit('/', 1)[1]
    N = config.settings().sharding_letters
    return ndb.Key(ContentEntry,
                   key_id,
                   parent=datastore_utils.shard_key(hash_key, N,
                                                    'ContentShard'))
예제 #31
0
    def __init__(self, *args, **kwargs):
        self.set_logger()

        self.registry = collections.deque(maxlen=1000)

        self.setup_rmq()

        if settings()['driver'] == 'chrome':
            chrome_options = Options()
            chrome_options.add_argument("--headless")
            chrome_options.add_argument('--no-sandbox')
            chrome_options.add_argument('--disable-dev-shm-usage')
            self.driver = webdriver.Chrome(ChromeDriverManager().install(),
                                           options=chrome_options)
        else:
            self.driver = webdriver.Firefox(
                executable_path=settings()['firefox_driver_path'])
예제 #32
0
 def gs_url_signer(self):
     """On demand instance of CloudStorageURLSigner object."""
     if not self._gs_url_signer:
         settings = config.settings()
         self._gs_url_signer = gcs.URLSigner(
             settings.gs_bucket, settings.gs_client_id_email, settings.gs_private_key
         )
     return self._gs_url_signer
예제 #33
0
파일: keystone.py 프로젝트: dockerian/pyapi
def get_auth_token():
    """
    Get an auth token from Keystone.
    """
    try:
        keystone = keystone_client.Client(
            username=settings('cloud_username'),
            password=settings('cloud_password'),
            tenant_name=settings('cloud_project_name'),
            auth_url=settings('cloud_auth_url'),
            insecure=True)
        return keystone.auth_ref['token']['id']
    except Exception as e:
        error_message = "Exception authenticating against Keystone."
        logger.exception(error_message)
        # bubble the exception up - don't swallow it
        raise
예제 #34
0
 def gs_url_signer(self):
     """On demand instance of CloudStorageURLSigner object."""
     if not self._gs_url_signer:
         settings = config.settings()
         self._gs_url_signer = gcs.URLSigner(settings.gs_bucket,
                                             settings.gs_client_id_email,
                                             settings.gs_private_key)
     return self._gs_url_signer
예제 #35
0
 def common(self, note):
   params = {
     'cfg': config.settings(fresh=True),
     'note': note,
     'path': self.request.path,
     'xsrf_token': self.generate_xsrf_token(),
   }
   self.response.write(
       template.render('isolate/restricted_config.html', params))
예제 #36
0
파일: model.py 프로젝트: misscache/luci-py
def new_content_entry(key, **kwargs):
  """Generates a new ContentEntry for the request.

  Doesn't store it. Just creates a new ContentEntry instance.
  """
  expiration, next_tag = expiration_jitter(
      utils.utcnow(), config.settings().default_expiration)
  return ContentEntry(
      key=key, expiration_ts=expiration, next_tag_ts=next_tag, **kwargs)
예제 #37
0
    def setUp(self):
        """Creates a new app instance for every test case."""
        super(MainTest, self).setUp()
        self.testbed.init_user_stub()

        # When called during a taskqueue, the call to get_app_version() may fail so
        # pre-fetch it.
        version = utils.get_app_version()
        self.mock(utils, 'get_task_queue_host', lambda: version)
        self.source_ip = '192.168.0.1'
        self.app_frontend = webtest.TestApp(
            handlers_frontend.create_application(debug=True),
            extra_environ={'REMOTE_ADDR': self.source_ip})
        # This is awkward but both the frontend and backend applications uses the
        # same template variables.
        template.reset()
        self.app_backend = webtest.TestApp(
            handlers_backend.create_application(debug=True),
            extra_environ={'REMOTE_ADDR': self.source_ip})
        # Tasks are enqueued on the backend.
        self.app = self.app_backend

        self.auth_app = webtest.TestApp(
            auth.create_wsgi_application(debug=True),
            extra_environ={
                'REMOTE_ADDR': self.source_ip,
                'SERVER_SOFTWARE': os.environ['SERVER_SOFTWARE'],
            })

        full_access_group = config.settings().auth.full_access_group
        readonly_access_group = config.settings().auth.readonly_access_group

        auth.bootstrap_group(
            auth.ADMIN_GROUP,
            [auth.Identity(auth.IDENTITY_USER, '*****@*****.**')])
        auth.bootstrap_group(
            readonly_access_group,
            [auth.Identity(auth.IDENTITY_USER, '*****@*****.**')])
        auth.bootstrap_group(
            full_access_group,
            [auth.Identity(auth.IDENTITY_USER, '*****@*****.**')])
        # TODO(maruel): Create a BOTS_GROUP.

        self.set_as_anonymous()
예제 #38
0
 def post(self):
     """Enumerates all GS files and delete those that do not have an associated
 ContentEntry.
 """
     gs_bucket = config.settings().gs_bucket
     logging.debug('Operating on GCS bucket: %s', gs_bucket)
     total = _incremental_delete(
         _yield_orphan_gcs_files(gs_bucket),
         lambda f: gcs.delete_file_async(gs_bucket, f, True))
     logging.info('Deleted %d lost GS files', total)
예제 #39
0
    def enqueue(self, id):
        if not self.rmq.is_open:
            self.setup_rmq()

        self.rmq_channel.basic_publish(
            exchange='',
            routing_key=settings()['rmq_queue'],
            body=id,
            properties=pika.BasicProperties(
                delivery_mode=2,  # make message persistent
            ))
예제 #40
0
def new_content_entry(key, **kwargs):
    """Generates a new ContentEntry for the request.

  Doesn't store it. Just creates a new ContentEntry instance.
  """
    expiration, next_tag = expiration_jitter(
        utils.utcnow(),
        config.settings().default_expiration)
    return ContentEntry(key=key,
                        expiration_ts=expiration,
                        next_tag_ts=next_tag,
                        **kwargs)
예제 #41
0
  def get(self):
    namespace = self.request.get('namespace', 'default-gzip')
    digest = self.request.get('digest', '')
    content = None

    if digest and namespace:
      try:
        raw_data, entity = model.get_content(namespace, digest)
      except ValueError:
        self.abort(400, 'Invalid key')
      except LookupError:
        self.abort(404, 'Unable to retrieve the entry')

      if not raw_data:
        stream = gcs.read_file(config.settings().gs_bucket, entity.key.id())
      else:
        stream = [raw_data]
      content = ''.join(model.expand_content(namespace, stream))

      self.response.headers['X-Frame-Options'] = 'SAMEORIGIN'
      # We delete Content-Type before storing to it to avoid having two (yes,
      # two) Content-Type headers.
      del self.response.headers['Content-Type']
      # Apparently, setting the content type to text/plain encourages the
      # browser (Chrome, at least) to sniff the mime type and display
      # things like images.  Images are autowrapped in <img> and text is
      # wrapped in <pre>.
      self.response.headers['Content-Type'] = 'text/plain; charset=utf-8'
      self.response.headers['Content-Disposition'] = str('filename=%s' % digest)
      if content.startswith('{'):
        # Try to format as JSON.
        try:
          content = json.dumps(
              json.loads(content), sort_keys=True, indent=2,
              separators=(',', ': '))
          # If we don't wrap this in html, browsers will put content in a pre
          # tag which is also styled with monospace/pre-wrap.  We can't use
          # anchor tags in <pre>, so we force it to be a <div>, which happily
          # accepts links.
          content = (
            '<div style="font-family:monospace;white-space:pre-wrap;">%s</div>'
             % content)
          # Linkify things that look like hashes
          content = re.sub(r'([0-9a-f]{40})',
            r'<a target="_blank" href="/browse?namespace=%s' % namespace +
              r'&digest=\1">\1</a>',
            content)
          self.response.headers['Content-Type'] = 'text/html; charset=utf-8'
        except ValueError:
          pass

    self.response.write(content)
def setSettings():
    newSettings = request.json['data']
    if newSettings['ip'] == 'localhost':
        saveSettings(newSettings)
    else:
        url = 'http://' + newSettings['ip'] + ':5001/save_settings'
        r = requests.post(url, json=newSettings)
    _settings = settings()
    for i in range(len(_settings)):
        if _settings[i]['device_name'] == newSettings['device_name']:
            _settings[i] = newSettings
    saveGroupSettings(_settings)
    return jsonify(_settings)
예제 #43
0
def is_good_content_entry(entry):
  """True if ContentEntry is not broken.

  ContentEntry is broken if it is in old format (before content namespace
  were sharded) or corresponding Google Storage file doesn't exist.
  """
  # New entries use GS file path as ids. File path is always <namespace>/<hash>.
  entry_id = entry.key.id()
  if '/' not in entry_id:
    return False
  # Content is inline, entity doesn't have GS file attached -> it is fine.
  if entry.content is not None:
    return True
  # Ensure GS file exists.
  return bool(gcs.get_file_info(config.settings().gs_bucket, entry_id))
예제 #44
0
파일: utils.py 프로젝트: genegis/genegis
def loadDefaultLayer(timeout=3600):
    # this is a hack -- if the class has been updated recently, refresh the list
    layer = None
    # reload before checking the settings
    settings = config.settings()
    with open(config.log_path, 'a') as log:
        log.write("loadDefaultLayer called. Current fc_path: {}\n".format(settings.fc_path))

        if arcpy.Exists(settings.fc_path):
            # pull out the parent FGDB mod time
            fgdb_path = os.path.dirname(settings.fc_path)
            diff_in_sec = time.time() - os.path.getmtime(fgdb_path)
            log.write("  time since fc_path was modified: {}\n".format(diff_in_sec))
            if timeout is None or diff_in_sec <= timeout:
                log.write("  adding layer...\n")
                layer = addLayerFromFile(settings.fc_path) 
    return layer  
예제 #45
0
  def post(self):
    logging.info('Deleting ContentEntry')
    incremental_delete(
        model.ContentEntry.query().iter(keys_only=True),
        ndb.delete_multi_async)

    gs_bucket = config.settings().gs_bucket
    logging.info('Deleting GS bucket %s', gs_bucket)
    incremental_delete(
        (i[0] for i in gcs.list_files(gs_bucket)),
        lambda filenames: gcs.delete_files(gs_bucket, filenames))

    logging.info('Flushing memcache')
    # High priority (.isolated files) are cached explicitly. Make sure ghosts
    # are zapped too.
    memcache.flush_all()
    logging.info('Finally done!')
예제 #46
0
파일: model.py 프로젝트: misscache/luci-py
def delete_entry_and_gs_entry(keys_to_delete):
  """Deletes synchronously a list of ContentEntry and their GS files.

  It deletes the ContentEntry first, then the files in GS. The worst case is
  that the GS files are left behind and will be reaped by a lost GS task
  queue. The reverse is much worse, having a ContentEntry pointing to a
  deleted GS entry will lead to lookup failures.
  """
  # Always delete ContentEntry first.
  ndb.delete_multi(keys_to_delete)
  # Note that some content entries may NOT have corresponding GS files. That
  # happens for small entries stored inline in the datastore or memcache. Since
  # this function operates only on keys, it can't distinguish "large" entries
  # stored in GS from "small" ones stored inline. So instead it tries to delete
  # all corresponding GS files, silently skipping ones that are not there.
  gcs.delete_files(
      config.settings().gs_bucket,
      (i.id() for i in keys_to_delete),
      ignore_missing=True)
예제 #47
0
def get_behavior_VT(local_index):
  hash_list = local_index['File']
  print '======> VT Enrichment Begins:'
  target = len(hash_list)
  counter = 0
  for item in hash_list:
    counter = counter+1
    portion = 100* counter / target
    print str(portion) + ' %'
    #print settings('enrich')['vt_api_key']
    params = {"apikey": settings('enrich')['vt_api_key'], 'hash': item}
    url = 'https://www.virustotal.com/vtapi/v2/file/behaviour'

    response = requests.get(url, params=params)
    json_response = response.json()
    #print json_response

    uris = []
    try:
      ips = json_response['network']['hosts']
      ips = [x for x in ips if (IP(x).iptype() == 'PUBLIC')]
      ips = [x for x in ips if (x not in ip_whitelist)]
      for packet in json_response['network']['http']:
        parsed_uri = urlparse(packet['uri'])
        netloc = parsed_uri.netloc
        uris.append(netloc)
    except KeyError:
      ips=[]

    for uri in uris:
      if uri not in local_index['DomainName']:
        local_index['DomainName'][uri] = ['{{no_ref}}']
    #print ip_whitelist
    for ip in ips:
      if ip not in local_index['Address'] and ip not in ip_whitelist:
        local_index['Address'][ip] = ['{{no_ref}}']

    if ips or uris:
      local_index['File'][item].append(ips)
      local_index['File'][item].append(uris)

  #pprint.pprint(local_index)
  return local_index
예제 #48
0
  def get(self, namespace, hash_key):  #pylint: disable=W0221
    logging.error('Unexpected old client')
    # Parse 'Range' header if it's present to extract initial offset.
    # Only support single continuous range from some |offset| to the end.
    offset = 0
    range_header = self.request.headers.get('range')
    if range_header:
      match = re.match(r'bytes=(\d+)-', range_header)
      if not match:
        return self.send_error(
            'Unsupported byte range.\n\'%s\'.' % range_header, http_code=416)
      offset = int(match.group(1))

    memcache_entry = memcache.get(hash_key, namespace='table_%s' % namespace)
    if memcache_entry is not None:
      self.send_data(memcache_entry, filename=hash_key, offset=offset)
      stats.add_entry(stats.RETURN, len(memcache_entry) - offset, 'memcache')
      return

    entry = model.get_entry_key(namespace, hash_key).get()
    if not entry:
      return self.send_error('Unable to retrieve the entry.', http_code=404)

    if entry.content is not None:
      self.send_data(entry.content, filename=hash_key, offset=offset)
      stats.add_entry(stats.RETURN, len(entry.content) - offset, 'inline')
      return

    # Generate signed download URL.
    settings = config.settings()
    # TODO(maruel): The GS object may not exist anymore. Handle this.
    signer = gcs.URLSigner(settings.gs_bucket,
        settings.gs_client_id_email, settings.gs_private_key)
    # The entry key is the GS filepath.
    signed_url = signer.get_download_url(entry.key.id())

    # Redirect client to this URL. If 'Range' header is used, client will
    # correctly pass it to Google Storage to fetch only subrange of file,
    # so update stats accordingly.
    self.redirect(signed_url)
    stats.add_entry(
        stats.RETURN, entry.compressed_size - offset, 'GS; %s' % entry.key.id())
예제 #49
0
def delete_entry_and_gs_entry(keys_to_delete):
  """Deletes synchronously a list of ContentEntry and their GS files.

  For each ContentEntry, it deletes the ContentEntry first, then the files in
  GS. The worst case is that the GS files are left behind and will be reaped by
  a lost GS task queue. The reverse is much worse, having a ContentEntry
  pointing to a deleted GS entry will lead to lookup failures.
  """
  futures = {}
  exc = None
  bucket = config.settings().gs_bucket
  # Note that some content entries may NOT have corresponding GS files. That
  # happens for small entries stored inline in the datastore or memcache. Since
  # this function operates only on keys, it can't distinguish "large" entries
  # stored in GS from "small" ones stored inline. So instead it tries to delete
  # all corresponding GS files, silently skipping ones that are not there.
  for key in keys_to_delete:
    # Always delete ContentEntry first.
    futures[key.delete_async()] = key.string_id()
    # Note: this is worst case O(n²) but will scale better than that. The goal
    # is to delete files as soon as possible.
    for f in futures.keys():
      try:
        if f.done():
          # This is synchronous.
          gcs.delete_file(bucket, futures.pop(f), ignore_missing=True)
      except Exception as exc:
        break
    if exc:
      break

  while futures:
    try:
      f = ndb.Future.wait_any(futures)
      # This is synchronous.
      gcs.delete_file(bucket, futures.pop(f), ignore_missing=True)
    except Exception as exc:
      continue

  if exc:
    raise exc  # pylint: disable=raising-bad-type
예제 #50
0
  def post(self):
    """Enumerates all GS files and delete those that do not have an associated
    ContentEntry.
    """
    gs_bucket = config.settings().gs_bucket

    def filter_missing():
      futures = {}
      cutoff = time.time() - 60*60
      for filepath, filestats in gcs.list_files(gs_bucket):
        # If the file was uploaded in the last hour, ignore it.
        if filestats.st_ctime >= cutoff:
          continue

        # This must match the logic in model.entry_key(). Since this request
        # will in practice touch every item, do not use memcache since it'll
        # mess it up by loading every items in it.
        # TODO(maruel): Batch requests to use get_multi_async() similar to
        # datastore_utils.page_queries().
        future = model.entry_key_from_id(filepath).get_async(
            use_cache=False, use_memcache=False)
        futures[future] = filepath

        if len(futures) > 20:
          future = ndb.Future.wait_any(futures)
          filepath = futures.pop(future)
          if future.get_result():
            continue
          yield filepath
      while futures:
        future = ndb.Future.wait_any(futures)
        filepath = futures.pop(future)
        if future.get_result():
          continue
        yield filepath

    gs_delete = lambda filenames: gcs.delete_files(gs_bucket, filenames)
    total = incremental_delete(filter_missing(), gs_delete)
    logging.info('Deleted %d lost GS files', total)
예제 #51
0
 def post(self):
   # Convert MultiDict into a dict.
   params = {
     k: self.request.params.getone(k) for k in self.request.params
     if k not in ('keyid', 'xsrf_token')
   }
   cfg = config.settings(fresh=True)
   keyid = int(self.request.get('keyid', '0'))
   if cfg.key.integer_id() != keyid:
     self.common('Update conflict %s != %s' % (cfg.key.integer_id(), keyid))
     return
   params['default_expiration'] = int(params['default_expiration'])
   cfg.populate(**params)
   try:
     # Ensure key is correct, it's easy to make a mistake when creating it.
     gcs.URLSigner.load_private_key(cfg.gs_private_key)
   except Exception as exc:
     # TODO(maruel): Handling Exception is too generic. And add self.abort(400)
     self.response.write('Bad private key: %s' % exc)
     return
   cfg.store()
   self.common('Settings updated')
예제 #52
0
import sys

startTime = strftime("%A %d %B %Y %H:%M")
startTime2 = strftime("%Y_%m_%d_%Hh%M")

print " \n      ******************************************"
print "      *            Data Processing             *"
print "      ******************************************\n"


# ---------------------- SETTINGS ----------------------------------------- #
# User have to open config.py and set necessary variables in method settings,
# then values are loaded.

from config import settings
param = settings()
try:
    if (sys.argv[1] == "--script") or (sys.argv[1] == "-s"):
        param["mode"] = "script"
    elif sys.argv[1] == "--mode" or sys.argv[1] == "-m":
        from loadAndCheck import setMode
        param = setMode(param)
    else:
        param["mode"] = "graph"
except IndexError:
        param["mode"] = "graph"


# ---------------------------- LOAD DATA ---------------------------------- #
# All information on processing is collected in a dictionary :
# param["parameter_name"] = parameter_value
예제 #53
0
 def is_enabled_callback():
   return config.settings().enable_ts_monitoring
예제 #54
0
from stix.utils import set_id_namespace
from stix.threat_actor import ThreatActor
from stix.indicator import Indicator, RelatedIndicator
from stix.common import Confidence, InformationSource, Identity
from stix.common.related import RelatedTTP
from stix.ttp import TTP, Behavior
from stix.ttp.behavior import MalwareInstance, AttackPattern
from stix.extensions.test_mechanism.snort_test_mechanism import SnortTestMechanism

from cybox.objects.address_object import Address
from cybox.objects.domain_name_object import DomainName
from cybox.objects.file_object import File
from cybox.common import Hash
from cybox.core import Observable

verbose = settings('main')['verbose_mode']

NAMESPACE = {"https://cert.gov.uk":"certuk"}
set_id_namespace(NAMESPACE)

def get_ref_from_obs(obs, index_):
	ref = ''
	out_type = ''
	for ind_type in index_:
		try:
			ref = index_[ind_type][obs][0]
			out_type = ind_type
		except KeyError, IndexError:
			pass
	return ref, out_type
예제 #55
0
  def post(self, namespace, hash_key):
    entry = model.entry_key(namespace, hash_key).get()
    if not entry:
      logging.error('Failed to find entity')
      return
    if entry.is_verified:
      logging.warning('Was already verified')
      return
    if entry.content is not None:
      logging.error('Should not be called with inline content')
      return

    # Get GS file size.
    gs_bucket = config.settings().gs_bucket
    gs_file_info = gcs.get_file_info(gs_bucket, entry.key.id())

    # It's None if file is missing.
    if not gs_file_info:
      # According to the docs, GS is read-after-write consistent, so a file is
      # missing only if it wasn't stored at all or it was deleted, in any case
      # it's not a valid ContentEntry.
      self.purge_entry(entry, 'No such GS file')
      return

    # Expected stored length and actual length should match.
    if gs_file_info.size != entry.compressed_size:
      self.purge_entry(entry,
          'Bad GS file: expected size is %d, actual size is %d',
          entry.compressed_size, gs_file_info.size)
      return

    save_to_memcache = (
        entry.compressed_size <= model.MAX_MEMCACHE_ISOLATED and
        entry.is_isolated)
    expanded_size = 0
    digest = model.get_hash_algo(namespace)
    data = None

    try:
      # Start a loop where it reads the data in block.
      stream = gcs.read_file(gs_bucket, entry.key.id())
      if save_to_memcache:
        # Wraps stream with a generator that accumulates the data.
        stream = Accumulator(stream)

      for data in model.expand_content(namespace, stream):
        expanded_size += len(data)
        digest.update(data)
        # Make sure the data is GC'ed.
        del data

      # Hashes should match.
      if digest.hexdigest() != hash_key:
        self.purge_entry(entry,
            'SHA-1 do not match data (%d bytes, %d bytes expanded)',
            entry.compressed_size, expanded_size)
        return

    except gcs.NotFoundError as e:
      # Somebody deleted a file between get_file_info and read_file calls.
      self.purge_entry(entry, 'File was unexpectedly deleted')
      return
    except (gcs.ForbiddenError, gcs.AuthorizationError) as e:
      # Misconfiguration in Google Storage ACLs. Don't delete an entry, it may
      # be fine. Maybe ACL problems would be fixed before the next retry.
      logging.warning(
          'CloudStorage auth issues (%s): %s', e.__class__.__name__, e)
      # Abort so the job is retried automatically.
      return self.abort(500)
    except (gcs.FatalError, zlib.error, IOError) as e:
      # ForbiddenError and AuthorizationError inherit FatalError, so this except
      # block should be last.
      # It's broken or unreadable.
      self.purge_entry(entry,
          'Failed to read the file (%s): %s', e.__class__.__name__, e)
      return

    # Verified. Data matches the hash.
    entry.expanded_size = expanded_size
    entry.is_verified = True
    future = entry.put_async()
    logging.info(
        '%d bytes (%d bytes expanded) verified',
        entry.compressed_size, expanded_size)
    if save_to_memcache:
      model.save_in_memcache(namespace, hash_key, ''.join(stream.accumulated))
    future.wait()
예제 #56
0
  def storage_helper(self, request, uploaded_to_gs):
    """Implement shared logic between store_inline and finalize_gs."""
    # validate token or error out
    if not request.upload_ticket:
      raise endpoints.BadRequestException(
          'Upload ticket was empty or not provided.')
    try:
      embedded = TokenSigner.validate(
          request.upload_ticket, UPLOAD_MESSAGES[uploaded_to_gs])
    except (auth.InvalidTokenError, ValueError) as error:
      raise endpoints.BadRequestException(
          'Ticket validation failed: %s' % error.message)

    # read data and convert types
    digest = embedded['d'].encode('utf-8')
    is_isolated = bool(int(embedded['i']))
    namespace = embedded['n']
    size = int(embedded['s'])

    # create a key
    key = entry_key_or_error(namespace, digest)

    # get content and compressed size
    if uploaded_to_gs:
      # ensure that file info is uploaded to GS first
      # TODO(cmassaro): address analogous TODO from handlers_api
      file_info = gcs.get_file_info(config.settings().gs_bucket, key.id())
      if not file_info:
        raise endpoints.BadRequestException(
            'File should be in Google Storage.\nFile: \'%s\' Size: %d.' % (
                key.id(), size))
      content = None
      compressed_size = file_info.size
    else:
      content = request.content
      compressed_size = len(content)

    # all is well; create an entry
    entry = model.new_content_entry(
        key=key,
        is_isolated=is_isolated,
        compressed_size=compressed_size,
        expanded_size=size,
        is_verified=not uploaded_to_gs,
        content=content,
    )

    # DB: assert that embedded content is the data sent by the request
    if not uploaded_to_gs:
      if (digest, size) != hash_content(content, namespace):
        raise endpoints.BadRequestException(
            'Embedded digest does not match provided data: '
            '(digest, size): (%r, %r); expected: %r' % (
                digest, size, hash_content(content, namespace)))
      entry.put()

    # GCS: enqueue verification task
    else:
      try:
        store_and_enqueue_verify_task(entry, utils.get_task_queue_host())
      except (
          datastore_errors.Error,
          runtime.apiproxy_errors.CancelledError,
          runtime.apiproxy_errors.DeadlineExceededError,
          runtime.apiproxy_errors.OverQuotaError,
          runtime.DeadlineExceededError,
          taskqueue.Error) as e:
        raise endpoints.InternalServerErrorException(
            'Unable to store the entity: %s.' % e.__class__.__name__)

    stats.add_entry(
        stats.STORE, entry.compressed_size,
        'GS; %s' % entry.key.id() if uploaded_to_gs else 'inline')
    return PushPing(ok=True)
예제 #57
0
def make_private_key():
  new_key = RSA.generate(1024)
  pem_key = base64.b64encode(new_key.exportKey('PEM'))
  config.settings().gs_private_key = pem_key
예제 #58
0
import sys
import webbrowser
from threading import Thread

import arcpy
import pythonaddins

# enable local imports
local_path = os.path.dirname(__file__)
for path in [local_path, os.path.join(local_path, 'toolbox')]:
    sys.path.insert(0, os.path.abspath(path))

# import local settings
import config
import utils
settings = config.settings()

# get the paths for our toolboxes
genegis_toolbox = os.path.join(local_path, "toolbox", "genegis.pyt")

#
# data management
#
class ImportData(object):
    """Implementation for genegis_import.button (Button)"""
    def __init__(self):
        self.enabled = True
        self.checked = False
    def onClick(self):
        utils.toolDialog(genegis_toolbox, "ClassifiedImport")