Beispiel #1
0
def main(
    user_email,
    url_api_collection,
    log_handler=None,
    mail_handler=None,
    dir_profile="profiles",
    profile_path=None,
    config_file=None,
):
    """Executes a harvest with given parameters.
    Returns the ingest_doc_id, directory harvest saved to and number of
    records.
    """
    if not config_file:
        config_file = os.environ.get("DPLA_CONFIG_FILE", "akara.ini")
    num_recs = -1
    my_mail_handler = None
    if not mail_handler:
        my_mail_handler = logbook.MailHandler(EMAIL_RETURN_ADDRESS, user_email, level="ERROR", bubble=True)
        my_mail_handler.push_application()
        mail_handler = my_mail_handler
    try:
        collection = Collection(url_api_collection)
    except Exception, e:
        msg = "Exception in Collection {}, init {}".format(url_api_collection, str(e))
        logbook.error(msg)
        raise e
Beispiel #2
0
    def install_issue_defaults(self, issue):
        """Assign default issue labels, milestone and assignee, if any."""

        defs = self.settings.NEW_ISSUE_DEFAULTS

        patch = {}

        milestone_title = defs.get('milestone')
        if milestone_title:
            milestone = self.github.Milestones.get_or_create_in_repository(
                self.repo_user, self.repo_name, milestone_title)
            patch['milestone'] = milestone.number

        if defs.get('labels') is not None:
            patch['labels'] = map(unicode, defs['labels'])

        if defs.get('assignee') is not None:
            patch['assignee'] = defs['assignee']

        if len(patch):
            if not self.dry_run:
                try:
                    issue.patch(**patch)
                except:
                    logbook.error(
                        u"Unable to change issue %s with attributes %r" %
                        (issue, patch))
                    raise
            logbook.info(u"Installed defaults %r for issue %s." %
                         (patch, issue))
Beispiel #3
0
def wait(num_retries=60, retry_sleep_seconds=1):
    import sqlalchemy

    from flask_app.app import create_app
    app = create_app()

    uri = app.config['SQLALCHEMY_DATABASE_URI']
    for retry in xrange(num_retries):
        logbook.info(
            "Testing database connection... (retry {0}/{1})", retry + 1, num_retries)
        if retry > 0:
            time.sleep(retry_sleep_seconds)
        try:
            sqlalchemy.create_engine(uri).connect()
        except sqlalchemy.exc.OperationalError as e:
            if 'does not exist' in str(e):
                break
            logbook.error(
                "Ignoring OperationError {0} (db still not availalbe?)", e)
        except Exception as e:
            logbook.error(
                "Could not connect to database ({0.__class__}: {0}. Going to retry...", e, exc_info=True)
        else:
            break
    else:
        raise RuntimeError("Could not connect to database")
    logbook.info("Database connection successful")
Beispiel #4
0
def slash_run(args, report_stream=None, resume=False, app_callback=None, test_sort_key=None):
    if report_stream is None:
        report_stream = sys.stderr
    with _get_slash_app_context(args, report_stream, resume) as app:
        if app_callback is not None:
            app_callback(app)
        try:
            with handling_exceptions():
                if resume:
                    session_ids = app.args.positionals
                    if not session_ids:
                        session_ids = [get_last_resumeable_session_id()]
                    to_resume = [x for session_id in session_ids for x in get_tests_to_resume(session_id)]
                    collected = app.test_loader.get_runnables(to_resume, sort_key=test_sort_key)
                else:
                    collected = _collect_tests(app, args, test_sort_key=test_sort_key)
                if app.args.interactive:
                    collected = itertools.chain([generate_interactive_test()], collected)
            with app.session.get_started_context():
                run_tests(collected)

        except SlashException as e:
            logbook.error(str(e))
            return -1
        finally:
            save_resume_state(app.session.results)

        if app.session.results.is_success(allow_skips=True):
            return 0
        return -1
Beispiel #5
0
    def aggregate(self, report_id):
        logbook.info("Get customer usage aggregation for {}", report_id)
        customer = Customer.get_by_id(report_id.customer_id)
        if not customer:
            raise Exception("Customer %s not found" % report_id.customer_id)

        with timed("get_usage simple"):
            aggregated_usage = ServiceUsage.get_usage(customer, report_id.start, report_id.end)

        tariffs = {}
        services = set()
        for usage in aggregated_usage:
            service_id, tariff_id, cost, usage_volume = usage
            services.add(service_id)
            if not tariff_id:
                logbook.error("ServiceUsage {} is not completed. Tariff is not filled", usage)
                continue
            tariff = Tariff.get_by_id(tariff_id)
            tariff_report = tariffs.get(tariff_id)
            if tariff_report is None:
                tariff_report = self.tariff_report_type(tariff, customer)
                tariffs[tariff_id] = tariff_report

            tariff_report.add_usage(usage)

        total = Counter()
        for tariff_id, tariff in tariffs.items():
            total_tariff, currency = tariff.aggregate()
            total[currency] += total_tariff

        for t, value in total.items():
            total[t] = decimal_to_string(value)

        logbook.info("Aggregated {} for {}. Services: {}", total, customer, services)
        return self.prepare_result(list(tariffs.values()), total, customer, report_id.start, report_id.end)
Beispiel #6
0
def register_file():
    beam_id = request.json['beam_id']
    beam = db.session.query(Beam).filter_by(id=beam_id).first()
    if not beam:
        logbook.error('Transporter attempted to post to unknown beam id {}', beam_id)
        abort(http.client.BAD_REQUEST)

    if beam.pending_deletion or beam.deleted:
        abort(http.client.FORBIDDEN)

    file_name = request.json['file_name']
    f = db.session.query(File).filter_by(beam_id=beam_id, file_name=file_name).first()
    if not f:
        logbook.info("Got upload request for a new file: {} @ {}", file_name, beam_id)
        f = File(beam_id=beam_id, file_name=file_name, size=None, status="pending")
        db.session.add(f)
        db.session.commit()
    else:
        logbook.info("Got upload request for a existing file: {} @ {} ({})", file_name, beam_id, f.status)

    if not f.storage_name:
        f.storage_name = "{}/{}-{}".format(
            _assure_beam_dir(beam.id), f.id, f.file_name.replace("/", "__").replace("\\", "__"))
        db.session.commit()

    if not beam.combadge_contacted:
        beam.combadge_contacted = True
        db.session.commit()

    return jsonify({'file_id': str(f.id), 'should_beam': f.status != 'uploaded', 'storage_name': f.storage_name})
Beispiel #7
0
    def stat(self):
        result = Counter()
        flavors = openstack.get_nova_flavors()
        id2flavor = {flavor.id: flavor for flavor in flavors}

        for tenant_id in Customer.active_tenants():
            servers = openstack.get_nova_servers(tenant_id=tenant_id)
            for server in servers:
                if not server.status == 'ACTIVE':
                    continue
                server_flavor_id = server.flavor['id']
                server_flavor = id2flavor.get(server_flavor_id)
                if server_flavor:
                    server_flavor_name = server_flavor.name
                    result['flavor.%s.vcpus' % server_flavor_name] += server_flavor.vcpus
                    result['flavor.%s.ram' % server_flavor_name] += server_flavor.ram
                else:
                    logbook.error("Server {} in tenant {} has unknown flavor: {}", server, tenant_id, server.flavor)

            # Check total resource usage for each tenant
            limits = openstack.get_nova_limits(tenant_id=tenant_id)
            for k, v in limits.items():
                if v > 0 and k in ['totalCoresUsed', 'totalRAMUsed']:
                    result[k] += v

        return result
Beispiel #8
0
    def decorator(*args, **kwargs):
        if config['DEBUG']:
            # signature not needed in DEBUG mode, continue
            debug('[auth] debug mode => skipping signature validation')
            return f(*args, **kwargs)

        try:
            payload = request.get_json()
            cur_time = int(time.time())

            # allow for latency of max 2000ms
            for timestamp in xrange(cur_time, cur_time - 3, -1):

                # the order of updates is important
                m = hashlib.sha512()
                m.update(payload['key'])
                m.update(config['app_secret'])
                m.update(str(timestamp))

                if m.hexdigest() == payload['sig']:
                    info('[auth] => valid signature, continue')
                    return f(*args, **kwargs)

        except:
            error('[auth] => could not parse / invalid sig')

            pass

        # we only get here if the sig is invalid or required params were missing
        abort(401)
Beispiel #9
0
    def patch(self, http=None, **kwargs):
        """Remotely alter a previously requested `RemoteObject` through an HTTP ``PATCH`` request.

        """

        location = getattr(self, 'url', None) or getattr(
            self, '_location', None)
        if location is None:
            raise ValueError('Cannot save %r with no URL to PATCH to' % self)

        body = json.dumps(kwargs)

        headers = {}
        if hasattr(self, '_etag') and self._etag is not None:
            headers['if-match'] = self._etag
        headers['content-type'] = self.content_types[0]

        request = self.get_request(url=location,
                                   method='PATCH',
                                   body=body,
                                   headers=headers)
        if http is None:
            http = httplib2.Http()
        response, content = http.request(**request)

        # print body, response, content

        try:
            self.update_from_response(location, response, content)
        except:
            logbook.error(u"Failed patch request: %r" % request)
            raise
Beispiel #10
0
def main(args):
    signal.signal(signal.SIGHUP, politwoops.utils.restart_process)

    log_handler = politwoops.utils.configure_log_handler(
        _script_, args.loglevel, args.output)
    with logbook.NullHandler():
        with log_handler.applicationbound():
            try:
                log.info("Starting Politwoops worker...")
                log.notice("Log level {0}".format(log_handler.level_name))
                if args.images:
                    log.notice("Screenshot support enabled.")

                with politwoops.utils.Heart() as heart:
                    politwoops.utils.start_watchdog_thread(heart)
                    app = DeletedTweetsWorker(heart, args.images)
                    if args.restart:
                        return politwoops.utils.run_with_restart(app.run)
                    else:
                        try:
                            return app.run()
                        except Exception as e:
                            logbook.error(
                                "Unhandled exception of type {exctype}: {exception}",
                                exctype=type(e),
                                exception=str(e))
                            if not args.restart:
                                raise

            except KeyboardInterrupt:
                log.notice("Killed by CTRL-C")
Beispiel #11
0
    def response_process(self, response, work_time):
        if response is None:
            logbook.error("Response can't be None")
            response = body_to_log = "{}"
            status = 200

        elif isinstance(response, bottle.Response):
            if response.content_type.startswith("text/") or response.content_type == "application/json":
                body_to_log = str(response.body) or getattr(response, "message", "")
            else:
                body_to_log = response.content_type
            status = response.status

        elif isinstance(response, dict):
            status = 200

            try:
                body_to_log = json.dumps(response, cls=DateTimeJSONEncoder)
                if self.debug:
                    response = json.dumps(response, cls=DateTimeJSONEncoder, indent=4)
                else:
                    response = body_to_log
            except TypeError:
                logbook.exception("Can't encode reply: {}", response)
                raise bottle.HTTPError(body="Internal Server Error")

            bottle.response.content_type = 'application/json'
        else:
            logbook.error("Incorrect response ({}): {}", type(response), response)
            body_to_log = str(response)
            status = 200
        self.log_response(body_to_log, status, work_time)
        return response
Beispiel #12
0
def wait(num_retries=60, retry_sleep_seconds=1):
    import sqlalchemy

    from flask_app.app import create_app
    app = create_app()

    uri = app.config['SQLALCHEMY_DATABASE_URI']
    for retry in xrange(num_retries):
        logbook.info(
            "Testing database connection... (retry {0}/{1})", retry + 1, num_retries)
        if retry > 0:
            time.sleep(retry_sleep_seconds)
        try:
            sqlalchemy.create_engine(uri).connect()
        except sqlalchemy.exc.OperationalError as e:
            if 'does not exist' in str(e):
                break
            logbook.error(
                "Ignoring OperationError {0} (db still not availalbe?)", e)
        except Exception as e:
            logbook.error(
                "Could not connect to database ({0.__class__}: {0}. Going to retry...", e, exc_info=True)
        else:
            break
    else:
        raise RuntimeError("Could not connect to database")
    logbook.info("Database connection successful")
Beispiel #13
0
    def install_issue_defaults(self, issue):
        """Assign default issue labels, milestone and assignee, if any."""

        defs = self.settings.NEW_ISSUE_DEFAULTS

        patch = {}

        milestone_title = defs.get('milestone')
        if milestone_title:
            milestone = self.github.Milestones.get_or_create_in_repository(self.repo_user, self.repo_name, milestone_title)
            patch['milestone'] = milestone.number

        if defs.get('labels') is not None:
            patch['labels'] = map(unicode, defs['labels'])

        if defs.get('assignee') is not None:
            patch['assignee'] = defs['assignee']

        if len(patch):
            if not self.dry_run:
                try:
                    issue.patch(**patch)
                except:
                    logbook.error(u"Unable to change issue %s with attributes %r" % (issue, patch))
                    raise
            logbook.info(u"Installed defaults %r for issue %s." % (patch, issue))
Beispiel #14
0
def analysis(doc):
    '''
    analysis the user info:
        birth position
        birth time
        man or famale
        mail
    '''
    # analysis the ID card
    # get the use-ful info
    card = doc['card']
    if len(card) is 18:
        position_num = [card[:2], card[:4], card[:6]]
        year, month = card[6:10], card[10:12]
        gender = card[16]
    elif len(card) is 15:
        position_num = [card[:2], card[:4], card[:6]]
        year, month = card[6:8], card[8:10]
        gender = card[14]
    # convert to use-ful info
    try:
        position_code = [int('{:0<6}'.format(item)) for item in position_num]
        year, month = int(year), int(month)
        gender = int(gender) % 2
    except StandardError, error_info:
        logbook.error('error:{}, card:{}'.format(error_info, card))
        return
Beispiel #15
0
def main(user_email,
         url_api_collection,
         log_handler=None,
         mail_handler=None,
         dir_profile='profiles',
         profile_path=None,
         config_file=None,
         redis_host=None,
         redis_port=None,
         redis_pswd=None,
         redis_timeout=600,
         rq_queue=None,
         run_image_harvest=False):
    '''Runs a UCLDC ingest process for the given collection'''
    cleanup_work_dir()  # remove files from /tmp
    emails = [user_email]
    if EMAIL_SYS_ADMIN:
        emails.extend([u for u in EMAIL_SYS_ADMIN.split(',')])
    if not mail_handler:
        mail_handler = logbook.MailHandler(
            EMAIL_RETURN_ADDRESS, emails, level='ERROR', bubble=True)
    mail_handler.push_application()
    if not config_file:
        config_file = os.environ.get('DPLA_CONFIG_FILE', 'akara.ini')
    if not (redis_host and redis_port and redis_pswd):
        config = config_harvest(config_file=config_file)

    try:
        collection = Collection(url_api_collection)
    except Exception, e:
        msg = 'Exception in Collection {}, init {}'.format(url_api_collection,
                                                           str(e))
        logbook.error(msg)
        raise e
Beispiel #16
0
def notify_managers_about_new_service_in_tariff(self, customer_id,
                                                flavor_name):
    customer = Customer.get_by_id(customer_id)
    if not customer:
        logbook.error("Customer {} not found in notify manager", customer_id)
        return
    logbook.info("notify manager about adding new service to tariff {}",
                 customer.tariff.name)
    from api.admin.user import UserApi
    service_edit_url = urljoin(
        request_base_url(),
        posixpath.join(UserApi.ADMIN_FRONTEND_PATH, "tariffs",
                       str(customer.tariff.tariff_id), "services"))
    customer_url = urljoin(
        request_base_url(),
        posixpath.join(UserApi.ADMIN_FRONTEND_PATH, "index",
                       str(customer.customer_id), "info"))

    subject, body = MessageTemplate.get_rendered_message(
        MessageTemplate.NEW_SERVICE_IN_TARIFF,
        language=preferred_language(),
        account=customer.email,
        flavor_name=flavor_name,
        tariff=customer.tariff.name,
        customer_url=customer_url,
        service_edit_url=service_edit_url)

    logbook.info("Sending email notification to delete data of {}",
                 customer.email)
    send_email.delay(get_customers_manager(customer), subject, body)
Beispiel #17
0
Datei: main.py Projekt: deti/boss
        def inner(*args, **kwargs):
            try:
                if new_session and not conf.test:
                    db.session.close()
                logbook.debug("Start task {} with args: {} {}", fn.__name__, args, kwargs)

                try:
                    h = "%8x" % abs(hash(args))
                except TypeError:
                    from pprint import pformat
                    h = hash(pformat(args))
                    h = "%8x" % abs(h)
                request_id = "%s-%s" % (fn.__name__, h[0:4])

                def inject_request_id(record):
                    record.extra['request_id'] = request_id

                with logbook.Processor(inject_request_id):
                    res = fn(*args, **kwargs)
                if auto_commit:
                    db.session.commit()
                logbook.debug("Result of task {}: {}", fn.__name__, res)
                return res
            except OperationalError as operation_error:
                logbook.warning("Database is down {}: {}", conf.database.uri, operation_error, exc_info=True)
                logbook.error("Database is down {}: {}", conf.database.uri, operation_error)
                db.session.close()
                current_task.retry(exc=operation_error, countdown=calc_exp_countdown())
            except Exception as exc:
                logbook.warning("{} failed. Retrying...", fn.__name__, exc_info=True)
                current_task.retry(exc=exc, countdown=calc_exp_countdown())
Beispiel #18
0
def register_file():
    beam_id = request.json['beam_id']
    beam = db.session.query(Beam).filter_by(id=beam_id).first()
    if not beam:
        logbook.error('Transporter attempted to post to unknown beam id {}', beam_id)
        abort(http.client.BAD_REQUEST)

    if beam.pending_deletion or beam.deleted:
        abort(http.client.FORBIDDEN)

    file_name = request.json['file_name']
    f = db.session.query(File).filter_by(beam_id=beam_id, file_name=file_name).first()
    if not f:
        logbook.info("Got upload request for a new file: {} @ {}", file_name, beam_id)
        f = File(beam_id=beam_id, file_name=file_name, size=None, status="pending")
        db.session.add(f)
        db.session.commit()
    else:
        logbook.info("Got upload request for a existing file: {} @ {} ({})", file_name, beam_id, f.status)

    if not f.storage_name:
        f.storage_name = "{}/{}-{}".format(
            _assure_beam_dir(beam.id), f.id, f.file_name.replace("/", "__").replace("\\", "__"))
        db.session.commit()

    if not beam.combadge_contacted:
        beam.combadge_contacted = True
        db.session.commit()

    return jsonify({'file_id': str(f.id), 'should_beam': f.status != 'uploaded', 'storage_name': f.storage_name})
Beispiel #19
0
def analysis(doc):
    '''
    analysis the user info:
        birth position
        birth time
        man or famale
        mail
    '''
    # analysis the ID card
    # get the use-ful info
    card = doc['card']
    if len(card) is 18:
        position_num = [card[:2], card[:4], card[:6]]
        year, month = card[6:10], card[10:12]
        gender = card[16]
    elif len(card) is 15:
        position_num = [card[:2], card[:4], card[:6]]
        year, month = card[6:8], card[8:10]
        gender = card[14]
    # convert to use-ful info
    try:
        position_code = [int('{:0<6}'.format(item))
                         for item in position_num]
        year, month = int(year), int(month)
        gender = int(gender) % 2
    except StandardError, error_info:
        logbook.error('error:{}, card:{}'.format(error_info, card))
        return
Beispiel #20
0
        def wrapper(*args, **kwargs):
            start_time = time.time()
            request_id = self.generate_id()

            def inject_request_id(record):
                record.extra['request_id'] = request_id

            with logbook.Processor(inject_request_id):
                logbook.notice(self.request_str(), extra={"api": True})

                try:
                    response = callback(*args, **kwargs)
                except OperationalError as e:
                    logbook.warning("Database is down {}: {}", conf.database.uri, e, exc_info=True)
                    logbook.error("Database is down {}: {}", conf.database.uri, e)
                    response = errors.DatabaseIsDown()
                except errors.BadRequest as e:
                    e.format_response()
                    response = e
                except bottle.HTTPResponse as e:
                    response = e
                except Exception as e:
                    if self.under_test:
                        import traceback
                        traceback.print_exc()
                    logbook.exception("Exception during processing request: %s %s" %
                                      (bottle.request.method, bottle.request.path))
                    self.log_response(str(e), 500, time.time() - start_time)
                    raise
                finally:
                    from model import db
                    db.session.remove()
                response = self.response_process(response, time.time() - start_time)

            return response
Beispiel #21
0
Datei: test.py Projekt: lhon/aldy
def test_single(sample, location, expected, profile, threshold, gene):
	expected = [r for r in expected if not pd.isnull(r)]
	message = '{} - {}'.format(sample, ' or '.join(expected))
	message = colorize('{:30}'.format(message, 'teal'))
	if '' not in expected:
		expected = [[str(x).strip() for x in re.split('[/\+]', r)] for r in expected]
		expected = set(tuple(sorted(r, key=sortkey)) for r in expected)
	else:
		expected = set()

	try:
		solutions = genotype.genotype(location, gene, profile, threshold)
	except:
		logbook.error('{} {}', message, colorize('CRASH ', 'red'))
		exit(1)

	def fix(s):
		return re.split('(\d+)', s)[1]
	orig_solutions = solutions
	solutions = set(tuple(sorted((fix(p) for p in s), key=sortkey)) for s in solutions)
	expected = set(tuple(sorted((fix(p) for p in s), key=sortkey)) for s in expected)

	if solutions == expected:
		logbook.info('{} {} {}', message, colorize('OK   ', 'green'), list(orig_solutions))
		return 1
	elif solutions <= expected and len(solutions) != 0:
		logbook.info('{} {} {}', message, colorize('OK<  ', 'green'), list(orig_solutions))
		return 2
	elif len(expected & solutions) > 0:
		logbook.warn('{} {} {}', message, colorize('MULTI', 'yellow'), list(orig_solutions))
		return 3
	else:
		logbook.error('{} {} {}', message, colorize('FAIL ', 'red'), list(orig_solutions))
		return 0
Beispiel #22
0
    def patch(self, http=None, **kwargs):
        """Remotely alter a previously requested `RemoteObject` through an HTTP ``PATCH`` request.

        """

        location = getattr(self, 'url', None) or getattr(self, '_location', None)
        if location is None:
            raise ValueError('Cannot save %r with no URL to PATCH to' % self)

        body = json.dumps(kwargs)

        headers = {}
        if hasattr(self, '_etag') and self._etag is not None:
            headers['if-match'] = self._etag
        headers['content-type'] = self.content_types[0]

        request = self.get_request(url=location, method='PATCH', body=body, headers=headers)
        if http is None:
            http = httplib2.Http()
        response, content = http.request(**request)

        # print body, response, content

        try:
            self.update_from_response(location, response, content)
        except:
            logbook.error(u"Failed patch request: %r" % request)
            raise
Beispiel #23
0
def subprocess_run(command, abort, term, log=True, **kwargs):
    if log:
        logbook.info(command)
    p = Popen(
            args=command,
            stdout=PIPE,
            stderr=STDOUT,
            **kwargs
            )
    outputs = []
    for line in iter(p.stdout.readline, ''):
        if term and term.is_set():
            term = None # Don't spam calls to terminate
            p.terminate()
            logbook.info('Caught TERM signal: stopping')
        line = line.strip()
        outputs.append(line)
        if log:
            logbook.info(line)
    returncode = p.wait()
    if returncode == 0:
        return outputs
    else:
        if abort:
            abort.set()
        exc = Exception(command, outputs, returncode)
        logbook.error(exc)
Beispiel #24
0
    def _get_localized(cls, template_id, data_type, data_value, language):
        value = data_value.get(language)
        if value:
            return value

        logbook.error("{} from template {} not found for language {}", data_type, template_id, language)
        return data_value[conf.ui.default_language]
Beispiel #25
0
    def make_action(cls, state, now=None):
        from model import Customer
        logbook.info("Try apply action: {}", state)

        now = now or utcnow().datetime

        machine = cls.machines[state.name]
        customer = Customer.get_by_id(state.customer_id)
        try:
            new_state_name = getattr(machine, state.action)(customer)
        except Exception as e:
            logbook.error("action {} failed: {}", state, e)
            state.remove()
            db.session.commit()
            raise

        state.step += 1
        if not new_state_name:
            if state.step >= len(machine.schedule):
                logbook.info("Actions {} are completed for {}", cls.name, customer)
                state.remove()
                db.session.commit()
                return

            new_state = machine.schedule[state.step]
        else:
            new_state = find_first(machine.schedule, lambda x: x[0] == new_state_name)
            if not new_state:
                state.remove()
                db.session.commit()
                raise Exception("Can't find new state %s for machine %s" % (new_state_name, cls.name))
        state.action = new_state[0]
        state.scheduled_at = now + timedelta(seconds=new_state[1])
        logbook.info("New action {} is scheduled", state)
Beispiel #26
0
    def collect_channels(self):
        """retrieve the list of the top 99 streams and simply mark them as interesting streams to watch.

        What twitch returns is a list of streams associated with a channel. The difference is that a stream
        is one live session while the channel contains them. We will retrieve only channel information.
        
        """
        res = self._get("/streams?limit=99")
        inserts = []
        ok = True
        if len(res['streams']) == 0:
            logbook.error("list of streams is empty, aborting")
            ok = False
        for stream in res['streams']:
            channel = stream['channel']
            d = dict(
                _id = channel['_id'],
                name = channel['name'],
                display_name = channel['display_name'],
                url = channel['url'],
                logo = channel['logo'],
                created_at = channel['created_at'],
            )
            stream['date'] = datetime.datetime.now()
            stream['sid'] = stream['_id']
            stream['_id'] = unicode(uuid.uuid4())
            self.db.channels.save(d)            # this is the unique list of channels
            del stream['channel']['_links']    # remove links to make data more compact
            inserts.append(stream)
        if ok:
            logbook.debug("saving %s streams" %len(inserts))
            self.db.streams.insert(inserts)      # this is the snapshot of all streams every hour
Beispiel #27
0
    def passthough(self, method, url):
        assert(method in ('GET', 'POST'))
        if self.entry_point is None:
            raise NotFound()
        url = posixpath.join(self.entry_point, url)
        request_params = '; '.join([k + '=' + (','.join(request.params.getall(k)))
                                   for k in request.params.keys()])

        logbook.debug("[graphite] Request: {} with {}.", url, request_params)
        request.body.seek(0)
        body = request.body.read()
        logbook.debug("Request query: {}", request.query_string)
        logbook.debug("Request body: {}", body)

        headers = dict(request.headers)
        if method == "GET":
            headers.pop("Content-Length", None)
        logbook.debug("Request headers: {}", headers)
        try:
            resp = requests.request(method, url, params=request.query_string, data=body,
                                    stream=True, headers=headers)
        except RequestException as err:
            logbook.error("[graphite] Request exception: {}. Url: {}. Params: {}",
                          err, url, request_params)
            raise ServiceUnavailable(str(err))

        headers = {k: v for (k, v) in resp.headers.items()
                   if k.lower() not in self.hopbyhop_headers}
        return HTTPResponse(resp.raw.data, status=resp.status_code, headers=headers)
def main(args):
    signal.signal(signal.SIGHUP, politwoops.utils.restart_process)

    log_handler = politwoops.utils.configure_log_handler(_script_, args.loglevel, args.output)
    with logbook.NullHandler():
        with log_handler.applicationbound():
            try:
                log.info("Starting feed checker...")
                log.notice(u"Log level {0}".format(log_handler.level_name))

                with politwoops.utils.Heart() as heart:
                    politwoops.utils.start_watchdog_thread(heart)
                    app = FeedsChecker(heart)
                    if args.restart:
                        return politwoops.utils.run_with_restart(app.run)
                    else:
                        try:
                            return app.run()
                        except Exception as e:
                            logbook.error("Unhandled exception of type {exctype}: {exception}",
                                          exctype=type(e),
                                          exception=str(e))
                            if not args.restart:
                                raise

            except KeyboardInterrupt:
                log.notice(u"Killed by CTRL-C")
Beispiel #29
0
 def unpack_aggregated(packed):
     try:
         return msgpack.unpackb(packed,
                                object_hook=object_hook_datetime,
                                encoding='utf-8')
     except Exception as e:
         logbook.error("Invalid binary report: {}".format(e))
         return None
Beispiel #30
0
def test_single(sample, location, expected, profile, gene, solver):
    expected = [r for r in expected if not pd.isnull(r)]

    message = '{}::{}::{}'.format(sample, solver[:2], gene)
    expected = [[
        str(x).strip() for x in re.split(r'[/\+]', r) if str(x).strip() != ''
    ] for r in expected]
    expected = [tuple(sorted(r, key=sortkey)) for r in expected]

    def fix(s):
        return re.split(r'(\d+)', s)[1]

    expected = [
        tuple(sorted((fix(p) for p in s), key=sortkey)) for s in expected
    ]

    expected, expected_new = set(expected), set(expected[1:])

    solutions = genotype.genotype(
        location,
        'tmp/{}_{}_{}.out'.format(sample, gene, profile),
        'tmp/{}_{}_{}.log'.format(sample, gene, profile),
        gene,
        profile,
        0.5,
        solver,
        cn_solution=None,
        reference='/data/cb/inumanag/aldy/cram-genome.fa',
        cn_neutral_region=None)

    orig_solutions = '; '.join(','.join(s[1]) for s in solutions)
    orig_expected = '; '.join(','.join(s) for s in expected)
    solutions = set(
        tuple(sorted((fix(p) for p in s[1]), key=sortkey)) for s in solutions)

    if solutions == expected:
        logbook.warn('{:20} {} {:25} == {}', message,
                     colorize('OK   ', 'green'), orig_solutions, orig_expected)
        return 1
    elif solutions <= expected and len(solutions) != 0:
        if solutions == expected_new:
            logbook.warn('{:20} {} {:25} == {}', message,
                         colorize('OK=  ', 'green'), orig_solutions,
                         orig_expected)
        else:
            logbook.warn('{:20} {} {:25} <= {}', message,
                         colorize('OK?  ', 'green'), orig_solutions,
                         orig_expected)
        return 2
    elif len(expected & solutions) > 0:
        logbook.warn('{:20} {} {:25} =~ {}', message,
                     colorize('MULT ', 'yellow'), orig_solutions,
                     orig_expected)
        return 3
    else:
        logbook.error('{:20} {} {:25} != {}', message,
                      colorize('FAIL ', 'red'), orig_solutions, orig_expected)
        return 0
Beispiel #31
0
def doWork():
    session = Session()
    for run in scrapeRuns():
        try:
            session.merge(run)
            session.commit()
        except IntegrityError:
            error("Duplicate Run: " + str(run))
            session.rollback()
Beispiel #32
0
 def get_volume_type_list(self):
     if time.time() < self.cache_updated_at + self.UPDATE_INTERVAL:
         # prevent very often updates
         return
     self.volume_types_cache = {vtype.id: vtype.name for vtype in openstack.client_cinder.volume_types.list()}
     if not self.volume_types_cache:
         logbook.error("Volume type list if empty")
     logbook.info("Got list of volume types: {}", self.volume_types_cache)
     self.cache_updated_at = time.time()
Beispiel #33
0
    def stop_instance(self, server, tenant):
        server_name = server.name
        server_id = server.id
        tenant_id = tenant.id
        tenant_name = tenant.name

        start = arrow.utcnow()

        while (arrow.utcnow() - start).seconds < conf.openstack.server_state.limit:
            server = self.client_nova.servers.get(server_id)

            task_state = server._info['OS-EXT-STS:task_state']
            server_state = server._info['OS-EXT-STS:vm_state']

            log.debug("instance '{}' (id: {}) of tenant '{}' (id: {}) is in {} vm_state and in {} task_state",
                      server_name, server_id, tenant_name, tenant_id, server_state, task_state)

            if task_state is None:
                if VM_STATE.is_running(server_state):
                    break
                elif server_state == VM_STATE.PAUSED:
                    self.client_nova.servers.unpause(server_id)
                    log.info("Unpausing instance '{}' (id: {}) of tenant '{}' (id: {}) before stopping it", server_name,
                             server_id, tenant_name, tenant_id)
                elif server_state == VM_STATE.SUSPENDED:
                    self.client_nova.servers.resume(server_id)
                    log.info("Resuming instance '{}' (id: {}) of tenant '{}' (id: {}) before stopping it", server_name,
                             server_id, tenant_name, tenant_id)
                elif server_state == VM_STATE.RESCUED:
                    self.client_nova.servers.unrescue(server_id)
                    log.info("Unrescuing instance '{}' (id: {}) of tenant '{}' (id: {}) before stopping it",
                             server_name, server_id, tenant_name, tenant_id)
                elif server_state == VM_STATE.RESIZED:
                    self.client_nova.servers.confirm_resize(server_id)
                    log.info("Confirming resized instance '{}' (id: {}) of tenant '{}' (id: {}) before stopping it",
                             server_name, server_id, tenant_name, tenant_id)
                elif server_state in \
                        (VM_STATE.STOPPED, VM_STATE.SHELVED, VM_STATE.SHELVED_OFFLOADED, VM_STATE.SOFT_DELETED,
                         VM_STATE.DELETED, VM_STATE.ERROR):
                    log.info("instance '{}' (id: {}) of tenant '{}' (id: {}) is in the '{}' state and won't be stopped",
                             server_name, server_id, tenant_name, tenant_id, server_state)
                    return

            else:
                log.info("Waiting until instance '{}' (id: {}) of tenant '{}' (id: {}) gets to the active state",
                         server_name, server_id, tenant_name, tenant_id)
                time.sleep(conf.openstack.server_state.check)

        else:
            log.error("Time limit for getting instance '{}' (id: {}) of tenant '{}' (id: {}) to the active state " \
                      "exceeded", server_name, server_id, tenant_name, tenant_id)
            return

        log.info("Stopping instance '{}' (id: {}) of tenant '{}' (id: {})", server_name, server_id, tenant_name,
                 tenant_id)

        self.client_nova.servers.stop(server_id)
Beispiel #34
0
def doWork():
    session = Session()
    for run in scrapeRuns():
        try:
            session.merge(run)
            session.commit()
        except IntegrityError:
            error("Duplicate Run: " + str(run))
            session.rollback()
Beispiel #35
0
    def define_quadrants(self):
        """ Determine which quadrants (in AACGM coordinates) the OCB pole
        and data vector lie in

        Requires
        ---------
        self.ocb_aacgm_mlt : (float)
            OCB pole MLT in AACGM coordinates in hours
        self.aacgm_mlt : (float)
            Vector AACGM MLT in hours
        self.pole_angle : (float)
            vector angle in poles-vector triangle in degrees

        Updates
        --------
        self.ocb_quad : (int)
            OCB pole quadrant
        self.vec_quad : (int)
            Vector quadrant

        Notes
        ------
        North (N) and East (E) are defined by the AACGM directions centred on
        the data vector location, assuming vertical is positive downwards
        Quadrants: 1 [N, E]; 2 [N, W]; 3 [S, W]; 4 [S, E]
        """

        # Test input
        assert(not np.isnan(self.ocb_aacgm_mlt)), \
            logging.error("OCB pole location required")
        assert(not np.isnan(self.aacgm_mlt)), \
            logging.error("Vector AACGM location required")
        assert(not np.isnan(self.pole_angle)), \
            logging.error("vector angle in poles-vector triangle required")

        # Determine where the OCB pole is relative to the data vector
        ocb_adj_mlt = self.ocb_aacgm_mlt - self.aacgm_mlt
        while ocb_adj_mlt < 0.0:
            ocb_adj_mlt += 24.0
        if abs(ocb_adj_mlt) >= 24.0:
            ocb_adj_mlt -= 24.0 * np.sign(ocb_adj_mlt)

        if self.pole_angle < 90.0:
            # OCB pole lies in quadrant 1 or 2
            self.ocb_quad = 1 if ocb_adj_mlt < 12.0 else 2
        else:
            # OCB poles lies in quadrant 3 or 4
            self.ocb_quad = 3 if ocb_adj_mlt < 24.0 else 4

        # Now determine which quadrant the vector is pointed into
        if self.aacgm_n >= 0.0:
            self.vec_quad = 1 if self.aacgm_e >= 0.0 else 2
        else:
            self.vec_quad = 4 if self.aacgm_e >= 0.0 else 3

        return
Beispiel #36
0
 def cleanup(self, mailbox=None, created_after=None, search=None, prefix=None):
     try:
         messages = self.get_messages(mailbox, created_after, search, prefix)
     except Exception as e:
         logbook.error("Failed to cleanup mailtrap: {}", e)
         return
     logbook.debug("Deleting {} messages by filter {}".format(len(messages), search))
     for message in messages:
         logbook.debug("Mailtrap deleting: {}", self.short_message_str(message))
         self._delete(posixpath.join(self._mailbox_url(mailbox), str(message["id"])))
Beispiel #37
0
 def handle_rcpt(self, server, line, ctx):
     match = _RECIPIENT_REGEX.match(line)
     if not match:
         logbook.error("Invalid recipient line received: {!r}", line)
         server.send(_SYNTAX_ERROR)
         return
     recipient = match.groups()[0]
     logbook.debug('Will send to {!r}', recipient)
     ctx.recipients.append(recipient)
     server.send(_OK)
Beispiel #38
0
 def handle_rcpt(self, server, line, ctx):
     match = _RECIPIENT_REGEX.match(line)
     if not match:
         logbook.error("Invalid recipient line received: {!r}", line)
         server.send(_SYNTAX_ERROR)
         return
     recipient = match.groups()[0]
     logbook.debug('Will send to {!r}', recipient)
     ctx.recipients.append(recipient)
     server.send(_OK)
Beispiel #39
0
def validate_config(config):
    required_keys = ('gcloud_project', )

    result = True
    for key in required_keys:
        if not config.get(key):
            logbook.error('Environment variable {} is required', key.upper())
            result = False

    return result
Beispiel #40
0
    def update_from_response(self, url, response, content):
        try:
            r = super(RemoteObject, self).update_from_response(url, response, content)
        except:
            logbook.error(u"Received error response: %r, %s" % (response, content))
            raise

        self._rate_limit = (response.get('x-ratelimit-remaining'), response.get('x-ratelimit-limit'))

        return r
Beispiel #41
0
def notify_managers_about_hdd(self, customer_id):
    customer = Customer.get_by_id(customer_id)
    if not customer:
        logbook.error("Customer {} not found in notify manager", customer_id)
        return
    logbook.info("notify manager about hdd for removing for customer {}", customer)
    from model.account.customer_history import CustomerHistory
    block_event = CustomerHistory.get_last_block_event(customer)
    send_email_hdd_notification(get_customers_manager(customer),
                                block_event.date,
                                customer.email)
Beispiel #42
0
    def update_period(self, frequency, cron_data=None, now=None):
        cron_frequency = self.cron_frequency(frequency, cron_data=cron_data)
        now = now or utcnow().datetime
        try:
            cron = croniter(cron_frequency, start_time=now)
        except ValueError as e:
            logbook.error("Invalid frequency format {}: {}", cron_frequency, e)
            raise

        self.frequency = cron_frequency
        self.next_scheduled = cron.get_next(datetime)
Beispiel #43
0
        def wrapper(*args, **kwargs):
            if not request.json:
                abort(http.client.BAD_REQUEST)

            try:
                validator.validate(request.json)
            except Exception as e:
                logbook.error(e)
                abort(http.client.BAD_REQUEST)

            return f(*args, **kwargs)
Beispiel #44
0
 def send(self, metrics, timestamp=None, prefix=None):
     for name, value in metrics.items():
         if isinstance(value, (int, float)):
             metric_name = self.join((self.base_prefix, self.prefix, prefix, name))
             self.client.send(metric_name, value, timestamp)
         elif isinstance(value, dict):
             self.send(value, timestamp, prefix=self.join((prefix, name)))
         elif value is None:
             pass
         else:
             logbook.error("Invalid value type for metric {}: {} {}", name, type(value), value)
Beispiel #45
0
        def wrapper(*args, **kwargs):
            if not request.json:
                abort(http.client.BAD_REQUEST)

            try:
                validator.validate(request.json)
            except Exception as e:
                logbook.error(e)
                abort(http.client.BAD_REQUEST)

            return f(*args, **kwargs)
Beispiel #46
0
 def get_ceilometer_samples(self, q, limit):
     """ Queries ceilometer for all the entries"""
     try:
         result = openstack.client_ceilometer.new_samples.list(q=q, limit=limit)
         log.debug("[get_ceilometer_samples] query:{}, limit: {}. Number records: {}",
                   q, limit, len(result))
         return result
     except ClientException as e:
         log.error("[get_ceilometer_samples] Can't fetch usage info for query: {}, limit: {}. Error: {}",
                   q, limit, e)
         raise
Beispiel #47
0
def notify_managers_about_hdd(self, customer_id):
    customer = Customer.get_by_id(customer_id)
    if not customer:
        logbook.error("Customer {} not found in notify manager", customer_id)
        return
    logbook.info("notify manager about hdd for removing for customer {}",
                 customer)
    from model.account.customer_history import CustomerHistory
    block_event = CustomerHistory.get_last_block_event(customer)
    send_email_hdd_notification(get_customers_manager(customer),
                                block_event.date, customer.email)
Beispiel #48
0
 def __str__(self):
     # noinspection PyUnresolvedReferences
     try:
         fields = self.__table__.columns.keys()
         columns = ", ".join(
             "%s=%s" % (k, self.__dict__.get(k, "<Unknown field %s>" % k))
             for k in fields)
         return "<%s %s>" % (self.__class__.__name__, columns)
     except Exception as e:
         logbook.error("__str__ failed for {}: {}", type(self), e)
         return str(type(self))
Beispiel #49
0
    def calculate_usage_cost(self, usages):
        from model import Service, Category, ServicePrice
        from task.notifications import notify_managers_about_new_service_in_tariff

        total_cost = Decimal()
        tariff = self.tariff
        if not tariff:
            raise Exception("Tariff is not set for customer %s" % self)

        services = tariff.services_as_dict(lower=True)
        for usage in usages:
            db.session.add(usage)
            service_id = usage.service_id.lower() if isinstance(usage.service_id, str) else str(usage.service_id)
            service_price = services.get(service_id)
            service = Service.get_by_id(service_id)

            usage.tariff_id = tariff.tariff_id
            usage.customer_mode = self.customer_mode

            if service is None:
                logbook.error("Not found declaration service {} during calculate usage for {}",
                              usage.service_id, self)
                continue

            usage_volume = service.calculate_volume_usage(usage)
            usage.usage_volume = usage_volume

            if service_price is None:
                if service.category_id == Category.VM:
                    if service.deleted:
                        logbook.error("Service {} not found in {} for {}. But this service is archived",
                                      service_id, tariff, self)
                    else:
                        service_price = ServicePrice(service_id=service_id, price=Decimal(0), need_changing=True)
                        self.tariff.services.append(service_price)
                        services = tariff.services_as_dict(lower=True)
                        flavor_name = Service.get_by_id(service_id).flavor.flavor_id
                        notify_managers_about_new_service_in_tariff.delay(self.customer_id, flavor_name)
                else:
                    logbook.warning("Service {} not found in {} for {}. Allowed services: {}",
                                    service_id, tariff, self, services.keys())

            if service_price:
                usage_cost = usage_volume * service_price.price / service.hours
            else:
                usage_cost = Decimal(0)
            total_cost += usage_cost
            usage.cost = usage_cost

        logbook.info("Found {} usages for customer {}. Total cost of used resources is: {}",
                     len(usages), self, total_cost)

        return total_cost
Beispiel #50
0
 def get_volume_type_list(self):
     if time.time() < self.cache_updated_at + self.UPDATE_INTERVAL:
         # prevent very often updates
         return
     self.volume_types_cache = {
         vtype.id: vtype.name
         for vtype in openstack.client_cinder.volume_types.list()
     }
     if not self.volume_types_cache:
         logbook.error("Volume type list if empty")
     logbook.info("Got list of volume types: {}", self.volume_types_cache)
     self.cache_updated_at = time.time()
Beispiel #51
0
def process(doc):
    '''
    count the suffix of mail
    '''
    mail = doc['mail_0']
    if '@' not in mail:
        logbook.error(mail)
        return
    suffix = mail.split('@')[1]
    RDS.setnx(suffix, 0)
    RDS.incr(suffix)
    logbook.info('process: {}'.format(mail))
Beispiel #52
0
def _notify(app_notifiers, title, body, retry):
    for notifier in app_notifiers:
        while True:
            try:
                notifier.notify(title, body)
            except socket.error:
                if not retry:
                    raise
                logbook.error("Socket error: {}", traceback.format_exc())
                sleep(5)
            else:
                break
Beispiel #53
0
    def update_from_response(self, url, response, content):
        try:
            r = super(RemoteObject,
                      self).update_from_response(url, response, content)
        except:
            logbook.error(u"Received error response: %r, %s" %
                          (response, content))
            raise

        self._rate_limit = (response.get('x-ratelimit-remaining'),
                            response.get('x-ratelimit-limit'))

        return r
Beispiel #54
0
    def release(self):
        if not self.acquired:
            logbook.debug("Release skipping for {}", self)
            return False

        res = self._delete_lock_cmd([self.key], [self._token])
        acquiring_time = time.time() - self.acquired
        self.acquired = None
        if res:
            logbook.info("{} is released. Acquiring was {:.3f} seconds", self, acquiring_time)
            return True
        logbook.error("{} release failed {}. Acquired was {:.3f} seconds ago", self, res, acquiring_time)
        return False
Beispiel #55
0
def send_mail(fromaddr, recipients, message, secure=False):
    with smtpd_context() as client:
        try:
            client.ehlo()
            if secure:
                logbook.debug("Starting TLS...")
                client.starttls()
                logbook.debug("TLS initiated")
            client.sendmail(fromaddr, recipients, message)
        except:
            logbook.error("Error while sending email", exc_info=True)
            client.close()
            raise