Exemplo n.º 1
0
 def entry_to_log_event(entry):
     time = entry['_SOURCE_REALTIME_TIMESTAMP'].timestamp()
     hw_id = "" if snowflake.snowflake() is None else snowflake.snowflake()
     int_map = {'exit_status': entry['EXIT_STATUS']}
     normal_map = {'unit': entry['UNIT'],
                   'exit_code': entry["EXIT_CODE"]}
     return LogEntry(category=SERVICE_EXIT_CATEGORY,
                     time=int(time),
                     hw_id=hw_id,
                     normal_map=normal_map,
                     int_map=int_map)
Exemplo n.º 2
0
    async def _checkin(self, service_statusmeta):
        """
        if previous checkin is successful, create a new channel
        (to make sure the channel does't become stale). Otherwise,
        keep the existing channel.
        """
        if self._checkin_client is None:
            chan = ServiceRegistry.get_rpc_channel(
                    'checkind', ServiceRegistry.CLOUD)
            self._checkin_client = CheckindStub(chan)

        mconfig = self._service.mconfig

        request = CheckinRequest(
            gateway_id=snowflake.snowflake(),
            system_status=self._system_status(),
            platform_info=self._platform_info(),
            machine_info=self._machine_info(),
        )
        logging.debug('Checkin request:\n%s', request)

        for statusmeta in service_statusmeta.values():
            request.status.meta.update(statusmeta)

        try:
            await grpc_async_wrapper(
                self._checkin_client.Checkin.future(
                    request, mconfig.checkin_timeout,
                ),
                self._loop)
            self._checkin_done()
        except grpc.RpcError as err:
            self._checkin_error(err)
Exemplo n.º 3
0
    def sync(self, service_name):
        """
        Synchronizes sample queue for specific service to cloud and reschedules
        sync loop
        """
        if service_name in self._samples_for_service and \
           self._samples_for_service[service_name]:
            chan = ServiceRegistry.get_rpc_channel(
                'metricsd',
                ServiceRegistry.CLOUD,
                grpc_options=self._grpc_options)
            client = MetricsControllerStub(chan)
            if self.post_processing_fn:
                # If services wants to, let it run a postprocessing function
                # If we throw an exception here, we'll have no idea whether
                # something was postprocessed or not, so I guess try and make it
                # idempotent?  #m sevchicken
                self.post_processing_fn(
                    self._samples_for_service[service_name])

            samples = self._samples_for_service[service_name]
            sample_chunks = self._chunk_samples(samples)
            for idx, chunk in enumerate(sample_chunks):
                metrics_container = MetricsContainer(
                    gatewayId=snowflake.snowflake(), family=chunk)
                future = client.Collect.future(metrics_container,
                                               self.grpc_timeout)
                future.add_done_callback(
                    self._make_sync_done_func(service_name, idx))
            self._samples_for_service[service_name].clear()
        self._loop.call_later(self.sync_interval, self.sync, service_name)
Exemplo n.º 4
0
def sentry_init(service_name: str,
                sentry_mconfig: mconfigs_pb2.SharedSentryConfig) -> None:
    """Initialize connection and start piping errors to sentry.io."""

    sentry_status = get_sentry_status(service_name)
    if sentry_status == SentryStatus.DISABLED:
        return

    dsn_python, sample_rate = get_sentry_dsn_and_sample_rate(sentry_mconfig)

    if not dsn_python:
        logging.info(
            'Sentry disabled because of missing dsn_python. '
            'See documentation (Configure > AGW) on how to configure '
            'Sentry dsn.', )
        return

    sentry_sdk.init(
        dsn=dsn_python,
        release=os.getenv(COMMIT_HASH),
        traces_sample_rate=sample_rate,
        before_send=_ignore_if_not_marked
        if sentry_status == SentryStatus.SEND_SELECTED_ERRORS else None,
    )

    cloud_address = get_service_config_value(
        CONTROL_PROXY,
        CLOUD_ADDRESS,
        default=None,
    )
    sentry_sdk.set_tag(ORC8R_CLOUD_ADDRESS, cloud_address)
    sentry_sdk.set_tag(HWID, snowflake.snowflake())
    sentry_sdk.set_tag(SERVICE_NAME, service_name)
Exemplo n.º 5
0
def register_update(eapi):
    """Ensures the inbound URL for the BTS is up to date."""
    vpn_ip = system_utilities.get_vpn_ip()
    vpn_status = "up" if vpn_ip else "down"

    # This could fail when offline! Must handle connection exceptions.
    try:
        params = {
            'bts_uuid': snowflake.snowflake(),
            'vpn_status': vpn_status,
            'vpn_ip': vpn_ip,
            # federer always runs on port 80, but didn't in old versions
            'federer_port': "80",
        }
        r = requests.get(conf['registry'] + "/bts/register",
                         params=params,
                         headers=eapi.auth_header,
                         timeout=11)
        if r.status_code == 200:
            try:
                d = json.loads(r.text)
                if 'bts_secret' in d:
                    conf['bts_secret'] = d['bts_secret']
            except ValueError:
                pass
            return r.text
        else:
            raise ValueError("BTS registration update failed with status"
                             " %d (%s)" % (r.status_code, r.text))
    except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
        logger.error("register_update failed due to connection error or"
                     " timeout.")
Exemplo n.º 6
0
def main():
    parser = argparse.ArgumentParser(
        description='Show the UUID and base64 encoded DER public key', )
    parser.add_argument(
        "--pub_key",
        type=str,
        default="/var/opt/magma/certs/gw_challenge.key",
    )
    opts = parser.parse_args()

    public_key = load_public_key_to_base64der(opts.pub_key)
    msg = textwrap.dedent("""
        Hardware ID
        -----------
        {}

        Challenge key
        -------------
        {}

        Notes
        -----
        - Hardware ID is this gateway's unique identifier
        - Challenge key is this gateway's long-term keypair used for
          bootstrapping a secure connection to the cloud
        """)
    print(msg.format(snowflake.snowflake(), public_key.decode('utf-8')))
Exemplo n.º 7
0
 def GetGatewayId(self, _, context):
     """
     Get gateway hardware ID
     """
     return magmad_pb2.GetGatewayIdResponse(
         gateway_id=snowflake.snowflake(),
     )
Exemplo n.º 8
0
 def _get_checkin_request_as_state(self) -> State:
     request = self._checkin_manager.get_latest_checkin_request()
     value = MessageToJson(request)
     state = State(type="checkin_request",
                   deviceID=snowflake.snowflake(),
                   value=value.encode('utf-8'))
     return state
Exemplo n.º 9
0
 def sync(self):
     """
     Synchronizes sample queue to cloud and reschedules sync loop
     """
     if self._samples:
         chan = ServiceRegistry.get_rpc_channel('metricsd',
                                                ServiceRegistry.CLOUD)
         client = MetricsControllerStub(chan)
         if self.post_processing_fn:
             # If services wants to, let it run a postprocessing function
             # If we throw an exception here, we'll have no idea whether
             # something was postprocessed or not, so I guess try and make it
             # idempotent?  #m sevchicken
             self.post_processing_fn(self._samples)
         samples = self._retry_queue + self._samples
         metrics_container = MetricsContainer(
             gatewayId=snowflake.snowflake(), family=samples)
         future = client.Collect.future(metrics_container,
                                        self.grpc_timeout)
         future.add_done_callback(
             lambda future: self._loop.call_soon_threadsafe(
                 self.sync_done, samples, future))
         self._retry_queue.clear()
         self._samples.clear()
     self._loop.call_later(self.sync_interval, self.sync)
Exemplo n.º 10
0
def sentry_init(service_name: str, sentry_mconfig: mconfigs_pb2.SharedSentryConfig) -> None:
    """Initialize connection and start piping errors to sentry.io."""

    sentry_config = _get_shared_sentry_config(sentry_mconfig)

    if not sentry_config.dsn:
        logging.info(
            'Sentry disabled because of missing dsn_python. '
            'See documentation (Configure > AGW) on how to configure '
            'Sentry dsn.',
        )
        return

    sentry_sdk.init(
        dsn=sentry_config.dsn,
        release=os.getenv(COMMIT_HASH),
        traces_sample_rate=sentry_config.sample_rate,
        before_send=_get_before_send_hook(sentry_config.exclusion_patterns),
    )

    cloud_address = get_service_config_value(
        CONTROL_PROXY,
        CLOUD_ADDRESS,
        default=None,
    )
    sentry_sdk.set_tag(ORC8R_CLOUD_ADDRESS, cloud_address)
    sentry_sdk.set_tag(HWID, snowflake.snowflake())
    sentry_sdk.set_tag(SERVICE_NAME, service_name)
Exemplo n.º 11
0
    def register_subscriber(self, imsi):
        """Send a request to the registry server with this BTS unique ID and
        the number.

        Raises: ValueError if the API failed to register the user
                400 - Bad parameters
                403 - User is not associated with this BTS
                404 - No numbers available
                409 - IMSI already registered to another network
                500 - Uh-oh
        """
        url = self.conf['registry'] + "/register/"
        try:
            r = requests.post(url,
                              headers=self.auth_header,
                              data={
                                  'imsi': imsi,
                                  'bts_uuid': snowflake.snowflake()
                              })
        except BaseException as e:  # log and rethrow
            logger.error("Endaga: Register network error: %s." % e)
            raise

        if r.status_code != 200:
            raise ValueError(r.text)

        return json.loads(r.text)
Exemplo n.º 12
0
def register_handler(client: RegistrationStub, args: List[str]) -> RegisterResponse:
    """
    Register a device and retrieves its control proxy
    Args:
        client: Registration stub
        args: command line arguments
    Returns:
        RegisterRequest: register request, used for printing after function returns
        RegisterResponse: response from gRPC call, either error or the control_proxy
    """
    req = RegisterRequest(
        token=args.token,
        hwid=AccessGatewayID(
            id=snowflake.snowflake(),
        ),
        challenge_key=ChallengeKey(
            key=load_public_key_to_base64der("/var/opt/magma/certs/gw_challenge.key"),
            key_type=ChallengeKey.KeyType.SOFTWARE_ECDSA_SHA256,
        ),
    )

    res = client.Register(req)
    if res.HasField("error"):
        raise Exception(res.error)

    return req, res
Exemplo n.º 13
0
    def _package_and_send_metrics(
            self, metrics: [metrics_pb2.MetricFamily],
            target: ScrapeTarget,
    ) -> None:
        """
        Send parsed and protobuf-converted metrics to cloud.
        """
        chan = ServiceRegistry.get_rpc_channel(
            'metricsd',
            ServiceRegistry.CLOUD,
            grpc_options=self._grpc_options,
        )

        client = MetricsControllerStub(chan)
        for chunk in self._chunk_samples(metrics):
            metrics_container = MetricsContainer(
                gatewayId=snowflake.snowflake(),
                family=chunk,
            )
            future = client.Collect.future(
                metrics_container,
                self.grpc_timeout,
            )
            future.add_done_callback(
                lambda future:
                self._loop.call_soon_threadsafe(
                    self.scrape_done, future, target,
                ),
            )

        self._loop.call_later(
            target.interval,
            self.scrape_prometheus_target, target,
        )
Exemplo n.º 14
0
 def _get_gw_state(self) -> Optional[State]:
     gw_state = self._checkin_manager.get_latest_gw_state()
     if gw_state is not None:
         state = State(type="gw_state",
                       deviceID=snowflake.snowflake(),
                       value=gw_state.encode('utf-8'))
         return state
     return None
Exemplo n.º 15
0
def updated_stored_mconfig():
    log_event(
        Event(
            stream_name="magmad",
            event_type="updated_stored_mconfig",
            tag=snowflake.snowflake(),
            value="{}",
        ))
Exemplo n.º 16
0
def disconnected_sync_rpc_stream():
    log_event(
        Event(
            stream_name="magmad",
            event_type="disconnected_sync_rpc_stream",
            tag=snowflake.snowflake(),
            value="{}"
        )
    )
Exemplo n.º 17
0
 def POST(self):
     data = web.input()
     # The auth token should be here.
     logger.info('GET mock subscriber provision')
     auth_token = web.ctx.env['HTTP_AUTHORIZATION']
     assert data.bts_uuid == snowflake.snowflake()
     assert len(data.imsi) == 19
     assert auth_token == "Token %s" % CONF['endaga_token']
     return json.dumps({ 'number': '1%010d' % random.randint(1000000000, 9999999999) })
Exemplo n.º 18
0
def established_sync_rpc_stream():
    log_event(
        Event(
            stream_name="magmad",
            event_type="established_sync_rpc_stream",
            tag=snowflake.snowflake(),
            value="{}"
        )
    )
Exemplo n.º 19
0
def _get_snowflake():
    """ Read UUID from /etc/snowflake. If it doesn't exist, die. """
    bts_uuid = snowflake()
    if bts_uuid:
        return bts_uuid

    SNOWFLAKE_MISSING = '/etc/snowflake missing'
    logger.critical(SNOWFLAKE_MISSING)
    raise SystemExit(SNOWFLAKE_MISSING)
Exemplo n.º 20
0
def restarted_services(services):
    # Convert to a list for JSON serializability
    services = list(services)
    log_event(
        Event(
            stream_name="magmad",
            event_type="restarted_services",
            tag=snowflake.snowflake(),
            value=json.dumps(RestartedServices(services=services).to_dict()),
        ))
Exemplo n.º 21
0
def makeSnow(flakes, size, stroke, t):
    snowCount = 0
    noFlake = 0
    t.pensize(stroke)
    while (snowCount < flakes):
        nextCoord = utils.randomCoord()
        if (allowFlake(nextCoord, size)):
            noFlake = 0
            snowflakes.append(nextCoord)
            utils.moveCursor(nextCoord[0], nextCoord[1], t)
            t.pencolor(utils.colorPicker())
            snowflake.snowflake(size, t)
            snowCount += 1
            if (snowCount % 3 == 0):
                eraseFlake(snowflakes[0], size, t)
        else:
            noFlake += 1
            if (noFlake > 5):
                eraseFlake(snowflakes[0], size, t)
Exemplo n.º 22
0
def getLocalHostDetails():
    while (1):
        try:
            hostid = snowflake.snowflake()
            ipAddr = socket.gethostbyname(socket.gethostname()).strip()
            totalCpus = multiprocessing.cpu_count() * 3
            return (hostid, ipAddr, totalCpus)
        except:
            print(str(sys.exc_info()))
            time.sleep(1)
Exemplo n.º 23
0
 def make_scoped_device_id(id, scope):
     """
     Create a deviceID of the format <id> for scope 'network'
     Otherwise create a key of the format <hwid>:<id> for 'gateway' or
     unrecognized scope.
     """
     if scope == "network":
         return id
     else:
         return snowflake.snowflake() + ":" + id
Exemplo n.º 24
0
def getLocalHostDetails():
  while(1):
    try:
      hostid = snowflake.snowflake()
      ipAddr = socket.gethostbyname(socket.gethostname()).strip()
      totalCpus = multiprocessing.cpu_count() * 3
      return(hostid,ipAddr,totalCpus)
    except:
      print(str(sys.exc_info()))
      time.sleep(1)
Exemplo n.º 25
0
def processed_updates(updates):
    # Convert updates to dicts for JSON serializability
    dict_updates = [MessageToDict(u) for u in updates]
    log_event(
        Event(
            stream_name="magmad",
            event_type="processed_updates",
            tag=snowflake.snowflake(),
            value=json.dumps(ProcessedUpdates(updates=dict_updates).to_dict()),
        ))
Exemplo n.º 26
0
def _randomize_sync_interval(interval: int) -> int:
    """_randomize_sync_interval increases sync interval by random amount.

    Increased sync interval ameliorates the thundering herd effect at Orc8r.
    "Random" increase is deterministic based on the gateway's HWID.
    """
    h = hashlib.md5()
    h.update(bytes(snowflake.snowflake(), 'utf8'))  # digest of hwid
    multiplier = (hash(h.hexdigest()) % 100) / 100  # to interval [0, 1]
    delta = multiplier * (interval / 5)  # up to 1/5 of target interval
    return int(interval + delta)
Exemplo n.º 27
0
def main():
    parser = argparse.ArgumentParser(
        description='Show the UUID and base64 encoded DER public key')

    parser.add_argument("--pub_key", type=str,
                        default="/var/opt/magma/certs/gw_challenge.key")
    opts = parser.parse_args()

    public_key = load_public_key_to_base64der(opts.pub_key)
    print("Hardware ID:\n------------\n%s\n" % snowflake.snowflake())
    print("Challenge Key:\n-----------\n%s" % public_key.decode('utf-8'))
Exemplo n.º 28
0
    def _get_gw_state(self) -> Optional[State]:
        gw_type = "gw_state"
        gw_state, has_all_required_fields = \
            self._gw_status_factory.get_serialized_status()
        if has_all_required_fields:
            self._error_handler.num_skipped_gateway_states = 0
            return self._make_state(gw_type, snowflake.snowflake(), gw_state)

        # check if we have failed to send states too many times in a row
        if 0 < self._error_handler.max_skipped_gw_states < \
                self._error_handler.num_skipped_gateway_states:
            logging.warning(
                "Number of skipped checkins exceeds %d "
                "(cfg: max_skipped_checkins). Checking in anyway.",
                self._error_handler.max_skipped_gw_states)
            # intentionally don't reset num_skipped_gateway_states here
            return self._make_state(gw_type, snowflake.snowflake(), gw_state)

        # skipping reporting gateway state
        self._error_handler.num_skipped_gateway_states += 1
        return None
Exemplo n.º 29
0
def processed_updates(configs_by_service):
    # Convert to dicts for JSON serializability
    configs = {}
    for srv, config in configs_by_service.items():
        configs[srv] = MessageToDict(config)
    log_event(
        Event(
            stream_name="magmad",
            event_type="processed_updates",
            tag=snowflake.snowflake(),
            value=json.dumps(configs),
        ))
Exemplo n.º 30
0
 def process_stream_updates(self, client, stream_name, callback):
     extra_args = self._get_extra_args_any(callback, stream_name)
     request = StreamRequest(gatewayId=snowflake.snowflake(),
                             stream_name=stream_name,
                             extra_args=extra_args)
     for update_batch in client.GetUpdates(request,
                                           timeout=self._stream_timeout):
         self._loop.call_soon_threadsafe(
             callback.process_update,
             stream_name,
             update_batch.updates,
             update_batch.resync,
         )
Exemplo n.º 31
0
    def generate_redirect_log_entry(self, redirect_info):
        time = int(datetime.datetime.now().timestamp())
        hw_id = snowflake.snowflake()
        int_map = {'server_response': redirect_info.server_response.http_code}
        normal_map = {
            'subscriber_ip': redirect_info.subscriber_ip,
            'redirect_address': redirect_info.server_response.redirect_address
        }

        return LogEntry(category=self.LOGGING_CATEGORY,
                        time=int(time),
                        hw_id=hw_id,
                        normal_map=normal_map,
                        int_map=int_map)
Exemplo n.º 32
0
Arquivo: vote.py Projeto: sjmf/votebox
def check_config(c):
    if 'uuid' not in c:
        c['uuid'] = snowflake.snowflake()
        log.info("My UUID is {}".format(c['uuid']))
        write_config(c)

    if 'key' not in c:
        log.info("Requesting API key from service at {}".format(SERVICE_URL))   
        response = requests.get( SERVICE_URL + 'key', params={'uuid':c['uuid']})

        if response.status_code == 200:
            log.info("Got API key, storing it in {}".format(CONFIG_FILE))
            response = json.loads(response.text)
            c['key'] = response['key']
            write_config(c)
        else:
            log.critical((
                    "Could not request API key (status {}) "+
                    "and it is not in the config file."
                ).format(response.status_code))
            error_state("Failed to get an API key. Cannot continue, but will keep flashing fail LED.")

    log.info("My configuration is: {}".format(json.dumps(c)))
    return c
Exemplo n.º 33
0
def checkin(server=conf["server"], data={}):
    private_ips = str(get_private_ips())
    data["private_ip"] = private_ips
    url = server + "/checkin/%s" % snowflake.snowflake()
    print url
    return requests.get(url, params=data)
Exemplo n.º 34
0
import snowflake
import subprocess
import time
import sys

dirSelf = os.path.dirname(os.path.realpath(__file__))
libDir = dirSelf.rstrip(os.sep).rstrip("theBox").rstrip(os.sep) + os.sep + "lib"
sys.path.append(libDir)

import constants


hostname = "google.com" #example
headers = {}
headers['user-agent'] = "theBox-v1.0"
boxid = snowflake.snowflake()
serverHost = "http://"+ constants.backendServer +"/ALIVE"
timeToWait = 30
timeInformed = 0

def getPublicIP():
  p = subprocess.Popen(['dig','+short','myip.opendns.com','@resolver1.opendns.com'],stdout=subprocess.PIPE)
  t = p.communicate()
  p.wait()
  if(t):
    return(t[0])
  else:
    return(0)