Пример #1
0
def fetch_ip_list(aws_key_id, aws_secret_key, s3_bucket, ip_list_filename):
    logger.debug("Fetching ip list from s3")
    s3 = boto.connect_s3(aws_access_key_id=aws_key_id,
                         aws_secret_access_key=aws_secret_key)
    bucket = s3.get_bucket(s3_bucket)
    ip_list_key = bucket.lookup(ip_list_filename)
    contents = ip_list_key.get_contents_as_string().rstrip()
    return contents.split("\n")
Пример #2
0
def pull_threat_exchange_data(type_str, params):
    logger.debug('Querying threat exchange for {}'.format(type_str))
    results = params['threat_class'].objects(**params['query_params'])
    docs = []
    for result in results:
        created_date = toUTC(datetime.now()).isoformat()
        es_doc = {'created_on': created_date, 'details': result}
        docs.append(es_doc)
    return docs
Пример #3
0
    def train(self):
        with tf.Session() as session:
            logger.info('Initializing {} model...'.format(self._model_to_use))
            model = self._models[self._model_to_use]()

            logger.info('Preparing model...')
            input = tf.placeholder(tf.float32, shape=[None, 784], name='input')
            train = tf.placeholder(tf.bool)
            labels = tf.placeholder(tf.float32, shape=[None, 10])
            inference = model.infer(input, train)
            cost = model.cost(inference, labels)
            optimize = model.optimize(cost, 0.1)
            evaluate = model.evaluate(inference, labels)

            logger.info('Initializing variables...')
            session.run(tf.global_variables_initializer())

            logger.info('Initializing saver...')
            self._saver = tf.train.Saver()
            tf.summary.scalar('cost', cost)
            writer = tf.summary.FileWriter(self._tensorboard_log_dir,
                                           graph=tf.get_default_graph())
            summarize = tf.summary.merge_all()

            total_batch = int(self._mnist_data.train.num_examples /
                              self._batch_size)
            step = 0
            for epoch in range(self._epochs):
                for _ in range(total_batch):
                    batch_x, batch_y = self._mnist_data.train.next_batch(
                        self._batch_size)

                    _, cost_value, summary = session.run(
                        [optimize, cost, summarize],
                        feed_dict={
                            input: batch_x,
                            labels: batch_y,
                            train: True,
                        })

                    step += 1
                    if step % 100 == 0:
                        logger.debug(
                            'epoch: {}, step: {}, cost: {:.2f}'.format(
                                epoch, step, cost_value))
                        writer.add_summary(summary, step)

            accuracy = session.run(evaluate,
                                   feed_dict={
                                       input: self._mnist_data.test.images,
                                       labels: self._mnist_data.test.labels,
                                       train: False,
                                   })
            logger.info('The accuracy is: {:.2f}%'.format(accuracy * 100))

            self._save_model(session)
Пример #4
0
def main():
    logger.debug('Starting')
    logger.debug(options)
    ips = fetch_ip_list(options.aws_access_key_id,
                        options.aws_secret_access_key, options.aws_bucket_name,
                        options.aws_document_key_name)
    if len(ips) < options.ips_list_threshold:
        raise LookupError('IP List contains less than ' +
                          str(options.ips_list_threshold) +
                          ' entries...something is probably up here.')
    save_ip_list(options.local_ip_list_path, ips)
Пример #5
0
    def run(self):
        self.taskQueue.set_message_class(RawMessage)
        while True:
            try:
                records = self.taskQueue.get_messages(options.prefetch)
                for msg in records:
                    body_message = msg.get_body()
                    event = json.loads(body_message)

                    if not event['Message']:
                        logger.error(
                            'Invalid message format for cloudtrail SQS messages'
                        )
                        logger.error('Malformed Message: %r' % body_message)
                        continue

                    if event['Message'] == 'CloudTrail validation message.':
                        # We don't care about these messages
                        continue

                    message_json = json.loads(event['Message'])

                    if 's3ObjectKey' not in message_json.keys():
                        logger.error(
                            'Invalid message format, expecting an s3ObjectKey in Message'
                        )
                        logger.error('Malformed Message: %r' % body_message)
                        continue

                    s3_log_files = message_json['s3ObjectKey']
                    for log_file in s3_log_files:
                        logger.debug('Downloading and parsing ' + log_file)
                        bucket = self.s3_connection.get_bucket(
                            message_json['s3Bucket'])

                        log_file_lookup = bucket.lookup(log_file)
                        events = self.process_file(log_file_lookup)
                        for event in events:
                            self.on_message(event)

                    self.taskQueue.delete_message(msg)

            except KeyboardInterrupt:
                sys.exit(1)
            except Exception as e:
                logger.exception(e)
                time.sleep(3)
            except (SSLEOFError, SSLError, socket.error) as e:
                logger.info('Received network related error...reconnecting')
                time.sleep(5)
                self.connection, self.taskQueue = connect_sqs(
                    options.region, options.accesskey, options.secretkey,
                    options.taskexchange)
                self.taskQueue.set_message_class(RawMessage)
Пример #6
0
    def run(self):
        self.taskQueue.set_message_class(RawMessage)
        while True:
            try:
                records = self.taskQueue.get_messages(options.prefetch)
                for msg in records:
                    body_message = msg.get_body()
                    event = json.loads(body_message)

                    if not event['Message']:
                        logger.error(
                            'Invalid message format for cloudtrail SQS messages'
                        )
                        continue

                    if event['Message'] == 'CloudTrail validation message.':
                        # We don't care about these messages
                        continue

                    message_json = json.loads(event['Message'])

                    if 's3ObjectKey' not in message_json.keys():
                        logger.error(
                            'Invalid message format, expecting an s3ObjectKey in Message'
                        )
                        continue

                    s3_log_files = message_json['s3ObjectKey']
                    for log_file in s3_log_files:
                        logger.debug('Downloading and parsing ' + log_file)
                        bucket = self.s3_connection.get_bucket(
                            message_json['s3Bucket'])

                        log_file_lookup = bucket.lookup(log_file)
                        events = self.process_file(log_file_lookup)
                        for event in events:
                            self.on_message(event)

                    self.taskQueue.delete_message(msg)

            except KeyboardInterrupt:
                sys.exit(1)
            except ValueError as e:
                logger.error('Exception while handling message: %r' % e)
            except Exception as e:
                logger.error('Exception received: %r' % e)
                time.sleep(3)

            time.sleep(.1)
Пример #7
0
def download_generic_alerts(repo_url, save_location, deploy_key):
    git_obj = cmd.Git(save_location)
    git_ssh_cmd = 'ssh -i %s' % deploy_key

    git_obj.update_environment(GIT_SSH_COMMAND=git_ssh_cmd)

    if not os.path.isdir(save_location):
        logger.debug("Cloning " + str(repo_url) + " into " +
                     str(save_location))
        Repo.clone_from(repo_url,
                        save_location,
                        env={'GIT_SSH_COMMAND': git_ssh_cmd})
    else:
        logger.debug("Updating " + str(save_location))
        git_obj.pull()
Пример #8
0
    def _run(self, script, script_parameters=None):
        casa_output_file = config.OUTPUT_PATH + "/casa_output.txt"

        if not script_parameters: script_parameters = self._dataset_path
        self._unlock_dataset()

        if config.CASA_CONFIGS['is_parallel']:
            command = self._form_mpi_command(script, script_parameters)
        else:
            command = self._form_casa_command(script, script_parameters)

        logger.debug("Executing command -> " + command)
        proc = subprocess.Popen(command,
                                stdin=subprocess.PIPE,
                                stdout=file(casa_output_file, 'a+'),
                                stderr=subprocess.PIPE,
                                shell=True)
        proc.wait()
        return proc
Пример #9
0
    def is_bad(self, global_median, deviation_threshold):
        matrix_median = self.median()
        matrix_sigma = self.mad_sigma()

        deviated_median = self._deviated_median(global_median,
                                                deviation_threshold,
                                                matrix_median)
        scattered_amplitude = self._scattered_amplitude(
            deviation_threshold, matrix_sigma)
        if deviated_median or scattered_amplitude:
            # logger.debug(Color.UNDERLINE + "matrix=" + str(self.amplitude_data_matrix) + Color.ENDC)
            logger.debug(Color.UNDERLINE + " median=" + str(matrix_median) +
                         ", median sigma=" + str(matrix_sigma) + ", mean=" +
                         str(self.mean()) + ", mean sigma=" +
                         str(self.mean_sigma()) + Color.ENDC)
            logger.debug(Color.WARNING + "median deviated=" +
                         str(deviated_median) + ", amplitude scattered=" +
                         str(scattered_amplitude) + Color.ENDC)
        return deviated_median or scattered_amplitude
Пример #10
0
def verify_events(options):
    es_client = ElasticsearchClient(options.esservers)
    for required_field in options.required_fields:
        logger.debug('Looking for events without ' + required_field)
        search_query = SearchQuery(hours=12)
        search_query.add_must_not(ExistsMatch(required_field))

        # Exclude all events that are mozdef related health and stats
        search_query.add_must_not(TermMatch('_type', 'mozdefstats'))
        search_query.add_must_not(TermMatch('_type', 'mozdefhealth'))

        search_query.add_aggregation(Aggregation('_type'))
        # We don't care about the actual events, we only want the numbers
        results = search_query.execute(es_client, size=1)
        for aggreg_term in results['aggregations']['_type']['terms']:
            count = aggreg_term['count']
            category = aggreg_term['key']
            logger.error(
                "Found {0} bad events of _type '{1}' missing '{2}' field".
                format(count, category, required_field))
Пример #11
0
    def _flag_bad_time_window(self, reason, element_id, data_set, global_sigma, global_median, scan_times, polarization,
                              scan_id, window_config):
        bad_window_found = False
        sliding_window = Window(data_set, window_config)
        while True:
            window_matrix = sliding_window.slide()
            if window_matrix.has_sufficient_data(window_config) and window_matrix.is_bad(global_median, window_config.mad_scale_factor * global_sigma):
                bad_window_found = True
                start, end = sliding_window.current_position()
                bad_timerange = scan_times[start], scan_times[end]

                if reason == BAD_TIME:
                    self.measurement_set.flag_bad_time(self.flag_file, polarization, scan_id, bad_timerange)
                    logger.debug('Time=' + ' was bad between' + scan_times[
                        start] + '[index=' + str(start) + '] and ' + scan_times[end] + '[index=' + str(end) + ']\n')

                elif reason == BAD_ANTENNA_TIME:
                    self.measurement_set.flag_bad_antenna_time(self.flag_file,polarization, scan_id, element_id, bad_timerange)
                    logger.debug('Antenna=' + str(element_id) + ' was bad between' + scan_times[
                        start] + '[index=' + str(start) + '] and ' + scan_times[end] + '[index=' + str(end) + ']\n')
                else:
                    self.measurement_set.flag_bad_baseline_time(self.flag_file,polarization, scan_id, element_id, bad_timerange)
                    logger.debug('Baseline=' + str(element_id) + ' was bad between' + scan_times[
                        start] + '[index=' + str(start) + '] and ' + scan_times[end] + '[index=' + str(end) + ']\n')

            if sliding_window.reached_end_of_collection(): break
        return bad_window_found
Пример #12
0
    def makerequest(self, query, stime, etime, maxid):
        payload = {
            'min_time': calendar.timegm(stime.utctimetuple()),
            'max_time': calendar.timegm(etime.utctimetuple()),
            'q': query
        }
        if maxid is not None:
            payload['max_id'] = maxid
        hdrs = {'X-Papertrail-Token': self._apikey}

        max_retries = 3
        total_retries = 0
        while True:
            logger.debug("Sending request to papertrail API")
            resp = requests.get(self._papertrail_api,
                                headers=hdrs,
                                params=payload)
            if resp.status_code == 200:
                break
            else:
                logger.debug("Received invalid status code: {0}: {1}".format(
                    resp.status_code, resp.text))
                total_retries += 1
                if total_retries < max_retries:
                    logger.debug("Sleeping a bit then retrying")
                    time.sleep(2)
                else:
                    logger.error("Received too many error messages...exiting")
                    logger.error("Last malformed response: {0}: {1}".format(
                        resp.status_code, resp.text))
                    sys.exit(1)

        return self.parse_events(resp.json())
Пример #13
0
def clearESCache():
    es = esConnect(None)
    indexes = es.get_indices()
    # assums index names  like events-YYYYMMDD etc.
    # used to avoid operating on current indexes
    dtNow = datetime.utcnow()
    indexSuffix = date.strftime(dtNow, '%Y%m%d')
    previousSuffix = date.strftime(dtNow - timedelta(days=1), '%Y%m%d')
    for targetindex in sorted(indexes):
        if indexSuffix not in targetindex and previousSuffix not in targetindex:
            url = '{0}/{1}/_stats'.format(random.choice(options.esservers),
                                          targetindex)
            r = requests.get(url)
            if r.status_code == 200:
                indexstats = json.loads(r.text)
                if indexstats['_all']['total']['search']['query_current'] == 0:
                    fielddata = indexstats['_all']['total']['fielddata'][
                        'memory_size_in_bytes']
                    if fielddata > 0:
                        logger.info('target: {0}: field data {1}'.format(
                            targetindex, indexstats['_all']['total']
                            ['fielddata']['memory_size_in_bytes']))
                        clearurl = '{0}/{1}/_cache/clear'.format(
                            random.choice(options.esservers), targetindex)
                        clearRequest = requests.post(clearurl)
                        logger.info(clearRequest.text)
                        # stop at one?
                        if options.conservative:
                            return
                else:
                    logger.debug(
                        '{0}: <ignoring due to current search > field data {1}'
                        .format(
                            targetindex, indexstats['_all']['total']
                            ['fielddata']['memory_size_in_bytes']))
            else:
                logger.error('{0} returned {1}'.format(url, r.status_code))
Пример #14
0
def save_ip_list(save_path, ips):
    ip_list_contents = '\n'.join(ips)
    logger.debug("Saving ip list")
    if os.path.isfile(save_path):
        logger.debug("Overwriting ip list file in " + str(save_path))
    else:
        logger.debug("Creating new ip list file at " + str(save_path))
    with open(save_path, "w+") as text_file:
        text_file.write(ip_list_contents)
Пример #15
0
 def assume_role(self, role_arn, role_session_name='unknown', policy=None):
     '''Return a boto.sts.credential.Credential object given a role_arn.
     First check if a Credential oject exists in the local self.credentials
     cache that is not expired. If there isn't one, assume the role of role_arn
     store the Credential in the credentials cache and return it'''
     logger.debug("Connecting to sts")
     if role_arn in self.credentials:
         if not self.credentials[role_arn] or not self.credentials[
                 role_arn].is_expired():
             # Return the cached value if it's False (indicating a permissions issue) or if
             # it hasn't expired.
             return self.credentials[role_arn]
     try:
         self.credentials[role_arn] = self.conn_sts.assume_role(
             role_arn=role_arn,
             role_session_name=role_session_name,
             policy=policy).credentials
         logger.debug("Assumed new role with credential %s" %
                      self.credentials[role_arn].to_dict())
     except Exception, e:
         print e
         logger.error("Unable to assume role %s due to exception %s" %
                      (role_arn, e.message))
         self.credentials[role_arn] = False
Пример #16
0
def save_db_data(save_path, db_data):
    temp_save_path = save_path + ".tmp"
    logger.debug("Saving db data to " + temp_save_path)
    with open(temp_save_path, "wb+") as text_file:
        text_file.write(db_data)
    logger.debug("Testing temp geolite db file")
    geo_ip = GeoIP(temp_save_path)
    # Do a generic lookup to verify we don't get any errors (malformed data)
    geo_ip.lookup_ip('8.8.8.8')
    logger.debug("Moving temp file to " + save_path)
    os.rename(temp_save_path, save_path)
Пример #17
0
def fetch_db_data(db_download_location):
    logger.debug('Fetching db data from ' + db_download_location)
    response = urllib2.urlopen(db_download_location)
    db_raw_data = response.read()
    with tempfile.NamedTemporaryFile(mode='wb') as temp:
        logger.debug('Writing compressed gzip to temp file: ' + temp.name)
        temp.write(db_raw_data)
        temp.flush()
        logger.debug('Extracting gzip data from ' + temp.name)
        tar = tarfile.open(temp.name)
        for tarinfo in tar:
            if tarinfo.name.endswith('GeoLite2-City.mmdb'):
                extracted_file = tar.extractfile(tarinfo.name)
                return extracted_file.read()
Пример #18
0
def main():
    logger.debug('Starting')
    logger.debug(options)
    ips = fetch_ip_list(options.aws_access_key_id,
                        options.aws_secret_access_key, options.aws_bucket_name,
                        options.aws_document_key_name)

    for manual_addition in options.manual_additions:
        if manual_addition == '':
            continue
        logger.debug("Adding manual addition: " + manual_addition)
        ips.append(manual_addition)

    if len(ips) < options.ips_list_threshold:
        raise LookupError('IP List contains less than ' +
                          str(options.ips_list_threshold) +
                          ' entries...something is probably up here.')
    save_ip_list(options.local_ip_list_path, ips)
Пример #19
0
 def flush_s3_creds(self):
     logger.debug('Recycling credentials and reassuming role')
     self.authenticate()
     Timer(self.flush_wait_time, self.flush_s3_creds).start()
Пример #20
0
def getQueueSizes():
    logger.debug('starting')
    logger.debug(options)
    es = ElasticsearchClient(options.esservers)
    sqslist = {}
    sqslist['queue_stats'] = {}
    qcount = len(options.taskexchange)
    qcounter = qcount - 1
    try:
        # meant only to talk to SQS using boto
        # and return queue attributes.a

        mqConn = boto.sqs.connect_to_region(
            options.region,
            aws_access_key_id=options.accesskey,
            aws_secret_access_key=options.secretkey)

        while qcounter >= 0:
            for exchange in options.taskexchange:
                logger.debug('Looking for sqs queue stats in queue' + exchange)
                eventTaskQueue = mqConn.get_queue(exchange)
                # get queue stats
                taskQueueStats = eventTaskQueue.get_attributes('All')
                sqslist['queue_stats'][qcounter] = taskQueueStats
                sqslist['queue_stats'][qcounter]['name'] = exchange
                qcounter -= 1
    except Exception as e:
        logger.error("Exception %r when gathering health and status " % e)

    # setup a log entry for health/status.
    sqsid = '{0}-{1}'.format(options.account, options.region)
    healthlog = dict(utctimestamp=toUTC(datetime.now()).isoformat(),
                     hostname=sqsid,
                     processid=os.getpid(),
                     processname=sys.argv[0],
                     severity='INFO',
                     summary='mozdef health/status',
                     category='mozdef',
                     source='aws-sqs',
                     tags=[],
                     details=[])
    healthlog['details'] = dict(username='******')
    healthlog['details']['queues'] = list()
    healthlog['details']['total_messages_ready'] = 0
    healthlog['details']['total_feeds'] = qcount
    healthlog['tags'] = ['mozdef', 'status', 'sqs']
    ready = 0
    qcounter = qcount - 1
    for q in sqslist['queue_stats'].keys():
        queuelist = sqslist['queue_stats'][qcounter]
        if 'ApproximateNumberOfMessages' in queuelist:
            ready1 = int(queuelist['ApproximateNumberOfMessages'])
            ready = ready1 + ready
            healthlog['details']['total_messages_ready'] = ready
        if 'ApproximateNumberOfMessages' in queuelist:
            messages = int(queuelist['ApproximateNumberOfMessages'])
        if 'ApproximateNumberOfMessagesNotVisible' in queuelist:
            inflight = int(queuelist['ApproximateNumberOfMessagesNotVisible'])
        if 'ApproximateNumberOfMessagesDelayed' in queuelist:
            delayed = int(queuelist['ApproximateNumberOfMessagesDelayed'])
        if 'name' in queuelist:
            name = queuelist['name']
        queueinfo = dict(queue=name,
                         messages_delayed=delayed,
                         messages_ready=messages,
                         messages_inflight=inflight)
        healthlog['details']['queues'].append(queueinfo)
        qcounter -= 1
    # post to elasticsearch servers directly without going through
    # message queues in case there is an availability issue
    es.save_event(index=options.index,
                  doc_type='mozdefhealth',
                  body=json.dumps(healthlog))
    # post another doc with a static docid and tag
    # for use when querying for the latest sqs status
    healthlog['tags'] = ['mozdef', 'status', 'sqs-latest']
    es.save_event(index=options.index,
                  doc_type='mozdefhealth',
                  doc_id=getDocID(sqsid),
                  body=json.dumps(healthlog))
Пример #21
0
 def test01(self):
     '''01'''
     self.assertTrue(True)
     logger.debug('in test2.py')
Пример #22
0
def initConfig():
    # elastic search servers
    options.esservers = list('{0}'.format(s) for s in getConfig(
        'esservers', 'http://localhost:9200', options.configfile).split(','))

    # memory watermark, set to 90 (percent) by default
    options.jvmlimit = getConfig('jvmlimit', 90, options.configfile)

    # be conservative? if set only clears cache for the first index found with no searches and cached field data
    # if false, will continue to clear for any index not matching the date suffix.
    options.conservative = getConfig('conservative', True, options.configfile)

    # check jvm memory first? or just clear cache
    options.checkjvmmemory = getConfig('checkjvmmemory', True,
                                       options.configfile)


if __name__ == '__main__':
    # configure ourselves
    parser = OptionParser()
    parser.add_option("-c",
                      dest='configfile',
                      default=sys.argv[0].replace('.py', '.conf'),
                      help="configuration file to use")
    (options, args) = parser.parse_args()
    initConfig()
    logger.level = logging.WARNING
    logger.debug('starting')

    main()
Пример #23
0
def main():
    logger.debug('Connecting to Elasticsearch')
    client = ElasticsearchClient(options.esservers)
    logger.debug('Connecting to threat exchange')
    access_token(options.appid, options.appsecret)
    state = State(options.state_file_name)
    current_timestamp = toUTC(datetime.now()).isoformat()
    # We're setting a default for the past 2 days of data
    # if there isnt a state file
    since_date_obj = toUTC(datetime.now()) - timedelta(days=2)
    since_date = since_date_obj.isoformat()
    if 'lastrun' in state.data.keys():
        since_date = state.data['lastrun']

    # A master dict of all the different types of
    # data we want to pull from threat exchange
    params = {
        'malware_hash': {
            'threat_class': Malware,
            'query_params': {},
        },
        'ip_address': {
            'threat_class': ThreatDescriptor,
            'query_params': {
                'type_': 'IP_ADDRESS',
            }
        },
        'domain': {
            'threat_class': ThreatDescriptor,
            'query_params': {
                'type_': 'DOMAIN',
            }
        },
        'uri': {
            'threat_class': ThreatDescriptor,
            'query_params': {
                'type_': 'URI',
            }
        },
        'debug_string': {
            'threat_class': ThreatDescriptor,
            'query_params': {
                'type_': 'DEBUG_STRING',
            }
        },
        'banner': {
            'threat_class': ThreatDescriptor,
            'query_params': {
                'type_': 'BANNER',
            }
        },
        'email_address': {
            'threat_class': ThreatDescriptor,
            'query_params': {
                'type_': 'EMAIL_ADDRESS',
            }
        },
        'file_created': {
            'threat_class': ThreatDescriptor,
            'query_params': {
                'type_': 'FILE_CREATED',
            }
        },
    }
    docs = {}
    for param_key, param in params.iteritems():
        param['query_params']['since'] = str(since_date)
        param['query_params']['until'] = str(current_timestamp)
        param['query_params']['dict_generator'] = True
        docs = pull_threat_exchange_data(param_key, param)
        logger.debug('Saving {0} {1} to ES'.format(len(docs), param_key))
        for doc in docs:
            client.save_object(index='threat-exchange',
                               doc_type=param_key,
                               body=doc)

    state.data['lastrun'] = current_timestamp
    state.save()
Пример #24
0
    def _mark_antennas_status(self, spw, polarization, scan_id, source_config,
                              base_antenna, r_matrix, history, antenna_count):
        calib_params = CalibParams(*source_config['calib_params'])
        r_threshold = source_config['angular_dispersion']['r_threshold']
        if base_antenna in history: return

        visibility_data = self.measurement_set.get_data(
            spw, {
                'start': calib_params.channel,
                'width': calib_params.width
            }, polarization, {'scan_number': scan_id},
            ["antenna1", "antenna2", 'phase', 'flag'])

        baselines = self.measurement_set.baselines_for(base_antenna,
                                                       polarization, scan_id)

        baselines_count = len(baselines)
        good_baselines_threshold = int(
            (source_config['angular_dispersion']['percentage_of_good_antennas']
             * baselines_count) / 100)
        min_doubtful_antennas = int(
            (source_config['angular_dispersion']
             ['percentage_of_min_doubtful_antennas'] * baselines_count) / 100)

        for (antenna1, antenna2) in baselines:
            if not visibility_data.phase_data_present_for_baseline(
                (antenna1.id, antenna2.id)):
                baselines_count -= 1
                continue

            phase_data = visibility_data.mask_baseline_data(
                visibility_data.baseline_index((antenna1.id, antenna2.id)))

            phase_set = PhaseSet(phase_data)
            r_value = phase_set.calculate_angular_dispersion()
            if r_value == PhaseSet.INVALID_ANGULAR_DISPERSION:
                baselines_count -= 1

            another_antenna = antenna2 if base_antenna == antenna1 else antenna1
            r_matrix.add(base_antenna, another_antenna, r_value)

        doubtful_antennas = r_matrix.get_doubtful_antennas(
            base_antenna, r_threshold, min_doubtful_antennas)

        good_baselines_count = baselines_count - len(doubtful_antennas)
        if good_baselines_count >= good_baselines_threshold:
            for doubtful_antenna in doubtful_antennas:
                doubtful_antenna.update_state(polarization, scan_id,
                                              AntennaStatus.DOUBTFUL)
            base_antenna.update_state(polarization, scan_id,
                                      AntennaStatus.GOOD)
        else:
            doubtful_antennas = set()
            base_antenna.update_state(polarization, scan_id, AntennaStatus.BAD)

        history.add(base_antenna)

        if baselines_count == 0:
            logger.debug("Antenna={0} was flagged".format(base_antenna))
        else:
            logger.debug(
                "Antenna={0}, total_baselines={1}, good_baselines_count={2}, Percentage={3}"
                .format(base_antenna, baselines_count, good_baselines_count,
                        good_baselines_count * 100 / baselines_count))

        for doubtful_antenna in doubtful_antennas:
            self._mark_antennas_status(spw, polarization, scan_id,
                                       source_config, doubtful_antenna,
                                       r_matrix, history, antenna_count)
Пример #25
0
def main():
    logger.debug('Starting')
    logger.debug(options)
    download_generic_alerts(options.alert_repo_url,
                            options.alert_data_location,
                            options.deploy_key_location)
Пример #26
0
def main():
    logger.debug('Starting')
    logger.debug(options)
    db_data = fetch_db_data(options.db_download_location)
    save_db_data(options.db_location, db_data)
Пример #27
0
 def _print_polarization_details(self, global_sigma, global_median, polarization, scan_id):
     logger.info(
         Color.BACKGROUD_WHITE + "Polarization =" + polarization + " Scan Id=" + str(scan_id) + Color.ENDC)
     logger.debug(
         Color.BACKGROUD_WHITE + "Ideal values = { median:" + str(global_median) + ", sigma:" + str(
             global_sigma) + " }" + Color.ENDC)
Пример #28
0
def main():
    logger.debug('Starting')
    logger.debug(options)
    getQueueSizes()
Пример #29
0
def main():
    logger.debug('Starting')
    logger.debug(options)
    verify_events(options)
Пример #30
0
    def run(self):
        # Boto expects base64 encoded messages - but if the writer is not boto it's not necessarily base64 encoded
        # Thus we've to detect that and decode or not decode accordingly
        self.taskQueue.set_message_class(RawMessage)
        while True:
            try:
                records = self.taskQueue.get_messages(options.prefetch)
                for msg in records:
                    # msg.id is the id,
                    # get_body() should be json

                    # pre process the message a bit
                    tmp = msg.get_body()
                    try:
                        msgbody = json.loads(tmp)
                    except ValueError:
                        # If Boto wrote to the queue, it might be base64 encoded, so let's decode that
                        try:
                            tmp = base64.b64decode(tmp)
                            msgbody = json.loads(tmp)
                        except Exception as e:
                            logger.error(
                                'Invalid message, not JSON <dropping message and continuing>: %r'
                                % msg.get_body())
                            self.taskQueue.delete_message(msg)
                            continue

                    # If this is still not a dict,
                    # let's just drop the message and move on
                    if type(msgbody) is not dict:
                        logger.debug(
                            "Message is not a dictionary, dropping message.")
                        self.taskQueue.delete_message(msg)
                        continue

                    event = dict()
                    event = msgbody

                    # Was this message sent by fluentd-sqs
                    fluentd_sqs_specific_fields = {
                        'az', 'instance_id', '__tag'
                    }
                    if fluentd_sqs_specific_fields.issubset(set(
                            msgbody.keys())):
                        # Until we can influence fluentd-sqs to set the
                        # 'customendpoint' key before submitting to SQS, we'll
                        # need to do it here
                        # TODO : Change nubis fluentd output to include
                        # 'customendpoint'
                        event['customendpoint'] = True

                    if 'tags' in event:
                        event['tags'].extend([options.taskexchange])
                    else:
                        event['tags'] = [options.taskexchange]

                    # process message
                    self.on_message(event, msg)

                    # delete message from queue
                    self.taskQueue.delete_message(msg)
                time.sleep(.1)

            except ValueError as e:
                logger.exception('Exception while handling message: %r' % e)
                self.taskQueue.delete_message(msg)
            except (SSLEOFError, SSLError, socket.error):
                logger.info('Received network related error...reconnecting')
                time.sleep(5)
                self.connection, self.taskQueue = connect_sqs(
                    options.region, options.accesskey, options.secretkey,
                    options.taskexchange)
                self.taskQueue.set_message_class(RawMessage)