示例#1
0
def api_state(data_dragon: DataDragon, remote_url: Optional[str]):
  data_dragon_path = Path(__file__).parent.parent if remote_url is None else Path(data_dragon.remote_data_dragon_path(remote_url=remote_url)).parent
  script = f'''
set -e
cd {process.escape_run_arg(run_arg=data_dragon_path)}
echo '––– Code state'
git remote update
git --no-pager diff
git --no-pager log -n 1
echo '––– Python state'
source python/environment/bin/activate
pip freeze
echo '––– Node state'
cat package-lock.json
echo '––– Status'
git status
'''
  run_args = process.script_command(script=script)
  _, out, error = data_dragon.run_remote_process(
    run_args=run_args,
    remote_url=remote_url or '',
    confirm=False,
    capture_output=True,
    shell=True
  )
  newline = '\n'
  state = f'{out.decode()}{f"{newline}Error output:{newline}{error.decode()}" if error else ""}'
  state_path = Path(__file__).parent.parent / 'output' / 'state' / f'api_state_{data_dragon.user.safe_file_name(name=remote_url if remote_url else "local")}_{data_dragon.user.date_file_name()}.txt'
  state_path.write_text(state)
  log.log(f'{state}\nAPI state written to {state_path}')
示例#2
0
 def run_query(query: SQL.Query, confirmation: Optional[str]='Run this migration query?') -> Optional[any]:
   log.log(query.substituted_query)
   if confirmation is not None and not data_dragon.user.present_confirmation(
     confirmation.format(query=query),
     default_response=True
   ):
     return None
   return query.run(sql_layer=layer)
示例#3
0
  def _getRawReport(self, startDate, endDate, granularity, api, campaign, adGroupIDs):
    tries = 3
    while True:
      try:
        granularity = RuleReportGranularity(granularity)
        selector = {
          "orderBy": [
            {
              "field": "impressions",
              "sortOrder": "DESCENDING"
            }
          ],
          "pagination": {
            "offset": 0, "limit": 1000
          }
        }
        if adGroupIDs is not None:
          selector["conditions"] = [{
            "field": "adGroupId",
            "operator": "IN",
            "values": adGroupIDs,
          }]

        date_format = "%Y-%m-%d"
        start_date_string = startDate.strftime(date_format)
        end_date_string = endDate.strftime(date_format)

        frames = self._get_report_function(api)(
          campaign=campaign,
          start_time=start_date_string,
          end_time=end_date_string,
          granularity=granularity.value,
          return_records_with_no_metrics=False,
          return_row_totals=False,
          selector=selector,
        )

        frames["campaignName"] = campaign.name
        frames["orgId"] = campaign._org_id
        frames["orgName"] = api.name

        report = pd.concat([frames], sort=True)
        if report.empty: return report

        report.date = report.date.apply(lambda d: datetime.strptime(d, granularity.dateFormatString))
        return report
      except (SystemExit, KeyboardInterrupt):
        raise
      except Exception as e:
        tries -= 1
        if not tries:
          raise
        log.log(f'Apple Search Ads report exception {repr(e)}\n\nWill retry after 30 seconds...')
        sleep(30)
示例#4
0
def refresh_tags(schema: str):
    query = RefreshTagsQuery(schema=schema)
    sql_layer = SQL.Layer()
    sql_layer.connect()
    query.run(sql_layer=sql_layer)
    notices = sql_layer.connection.notices
    sql_layer.commit()
    sql_layer.disconnect()
    newline = '\n'
    log.log(f'Refreshed tags: {newline.join(notices)}'
            if notices else 'Refreshed tags.')
示例#5
0
 def wrapper(*args, **kwargs):
   while True:
     if is_exiting:
       break
     try:
       return f(*args, **kwargs)
     except (KeyboardInterrupt, SystemExit):
       raise
     except MicraStopRetry:
       raise
     except:
       if enabled:
         traceback.print_exc()
         if pdb_enabled:
           pdb.post_mortem()
           if not click.confirm('Continue', default=True):
             if queue is not None:
               queue.put('q', block=False)
             raise MicraQuit
       else:
         raise
     log(f'Retrying {f.__name__}...')
     time.sleep(1)
示例#6
0
def ux_state(data_dragon: DataDragon, remote_url: str):
  url, _ = ResourceLocator.strip_locator_parameters(url=ResourceLocator.dealias_url(url=remote_url))
  parts = urllib.parse.urlparse(url)
  remote = urllib.parse.urlunparse((*parts[:5], ''))
  branch = parts.fragment
  datadragon_ux_path = Path(__file__).parent.parent / 'angular-datadragon'
  script = f'''
set -e
cd {process.escape_run_arg(datadragon_ux_path)}
git fetch -f {remote} {branch}:_ux_state
git --no-pager log -n 1 _ux_state
'''
  run_args = process.script_command(script=script)
  _, out, error = data_dragon.run_local_process(
    run_args=run_args,
    confirm=False,
    capture_output=True,
    shell=True
  )
  newline = '\n'
  state = f'{out.decode()}{f"{newline}Error output:{newline}{error.decode()}" if error else ""}'
  state_path = Path(__file__).parent.parent / 'output' / 'state' / f'ux_state_{data_dragon.user.safe_file_name(name=remote_url)}_{data_dragon.user.date_file_name()}.txt'
  state_path.write_text(state)
  log.log(f'{state}\nUX state written to {state_path}')
示例#7
0
    def run_almacen(self, job: Job, use_legacy: bool = False):
        print(f'Running job on almacen: {job._contents}')
        job.ran = datetime.utcnow()
        job.host = socket.gethostname()
        job._put(redis=self.redis)

        if self.dry_run:
            log(f'Dry run: pinging example.com...')
            # run_args = ['development_packages/moda/moda/test/test_input_output_error.sh']
            run_args = [
                'ping',
                'example.com',
                '-c',
                '3',
            ]
        else:
            job_path = os.path.realpath(
                os.path.join(
                    'output', 'job',
                    UserInteractor.safe_file_name(name=job._name))[-200:])
            job_configuration_path = f'{job_path}.json'
            job_result_path = f'{job_path}_result.json'
            with open(job_configuration_path, 'w') as f:
                json.dump(job.configuration, f, sort_keys=True)
            run_args = [
                os.path.join('scripts', 'almacen.sh'),
                self.config['workers']['almacen']['worker_path'],
                '-db',
                self.config['workers']['almacen']['database'],
                '-be',
                '-rt',
                '1',
                'company',
                '-c',
                job_configuration_path,
                '-o',
                job_result_path,
                'fill',
            ]
        if use_legacy:
            process = None

            def on_output(subprocess, *args):
                nonlocal process
                if process is None:
                    process = subprocess
                    self.queue.put(
                        f'subprocess {process.pid} set {shlex.quote(Coordinator.subprocess_command(run_args=run_args))}'
                    )

            return_code, _, __ = run_process_combined(run_args=run_args,
                                                      on_output=on_output,
                                                      echo=True)
            self.queue.put(f'subprocess {process.pid} clear')
        else:
            process, _, generator = run_process(run_args=run_args, echo=True)
            self.queue.put(
                f'subprocess {process.pid} set {shlex.quote(Coordinator.subprocess_command(run_args=run_args))}'
            )
            try:
                while True:
                    output_bytes, __, ___ = next(generator)
                    if output_bytes.endswith(b': '):
                        generator.send(b'x\n')
            except StopIteration as e:
                return_code = e.value
                self.queue.put(f'subprocess {process.pid} clear')

        job.finished = datetime.utcnow()
        job.result = return_code
        if return_code == 0:
            with open(job_result_path) as f:
                run_configuration = json.load(f)
            job.configuration = run_configuration[job_configuration_path]
        print(f'Job finished with code {job.result}. {job._name}')
        return job
示例#8
0
def api_deploy(ctx: any, data_dragon: DataDragon, should_stop: bool, should_start: bool, should_dump: bool, should_migrate: bool, should_install: bool, pull_branch: Optional[str], remote_url: Optional[str]):
  if should_stop:
    log.log(f'––> Stopping API')
    if remote_url is not None:
      ctx.invoke(
        remote,
        _confirm=False,
        remote_url=remote_url,
        remote_command=('api', 'stop')
      )
    else:
      ctx.invoke(
        api_stop
      )
  if pull_branch:
    log.log(f'––> Pulling branch {pull_branch}')
    ctx.invoke(
      api_pull,
      pull_branch=pull_branch,
      remote_url=remote_url
    )
  if should_install:
    log.log('––> Installing')
    ctx.invoke(
      api_install,
      remote_url=remote_url
    )

  if remote_url or pull_branch or should_install:
    remaining_options = []
    if should_dump:
      remaining_options.append('-b')
    if should_migrate:
      remaining_options.append('-m')
    if should_start:
      remaining_options.append('-s')
    if remaining_options:
      ctx.invoke(
        remote,
        _confirm=False,
        remote_url=remote_url or str(Path(__file__).parent.parent),
        remote_command=('api', 'deploy', *remaining_options)
      )
    return

  if should_dump:
    log.log('––> Backing up data')
    ctx.invoke(
      data,
      _moda_subcommand=dump,
    )
    log.log('––> Backing up files')
    ctx.invoke(
      files,
      _moda_subcommand=files_dump,
    )
  if should_migrate:
    log.log('––> Migrating data')
    ctx.invoke(
      data,
      _moda_subcommand=migrate,
    )
    log.log('––> Migrating files')
    ctx.invoke(
      files,
      _moda_subcommand=files_migrate,
    )
  if should_start:
    log.log(f'––> Starting API')
    ctx.invoke(
      api_start
    )
示例#9
0
def sync(ctx: any, data_dragon: DataDragon, should_stop: bool, should_migrate: bool, should_install: bool, should_start: bool, force_dry_rule_runs: bool, pull_branch: Optional[str], ux_deployment_url: Optional[str], suffix: str, include_collections: Tuple[str], exclude_collections: Tuple[str], should_check_status:bool, source_url: str, destination_url: str):
  if should_stop:
    log.log(f'––> Stopping DataDragon API on {destination_url}')
    ctx.invoke(
      remote,
      remote_url=destination_url,
      remote_command=('api', 'stop')
    )
  if ux_deployment_url:
    ctx.invoke(
      ux,
      _moda_subcommand=ux_deploy,
      _moda_subcommand_parameters={
        'remote_url': ux_deployment_url,
      }
    )
    
  if pull_branch:
    log.log(f'––> Pulling branch {pull_branch} DataDragon API on {destination_url}')
    ctx.invoke(
      api,
      _moda_subcommand=api_pull,
      _moda_subcommand_parameters={
        'pull_branch': pull_branch,
        'remote_url': destination_url,
      },
    )
  
  if should_install:
    log.log(f'––> Installing DataDragon API dependencies on {destination_url}')
    ctx.invoke(
      api,
      _moda_subcommand=api_install,
      _moda_subcommand_parameters={
        'remote_url': destination_url,
      },
    )

  log.log(f'––> Dumping data on {source_url}')
  ctx.invoke(
    remote,
    remote_url=source_url,
    remote_command=('data', '-s', suffix, 'dump')
  )

  log.log(f'––> Dumping files on {source_url}')
  ctx.invoke(
    remote,
    remote_url=source_url,
    remote_command=('files', '-s', suffix, 'dump')
  )

  if not data_dragon.remote_url_is_this_instance(remote_url=source_url):
    log.log(f'––> Pulling data from {source_url}')
    ctx.invoke(
      data,
      suffix=suffix,
      _moda_subcommand=pull,
      _moda_subcommand_parameters={
        'remote_url': source_url,
        'include_collections': include_collections,
        'exclude_collections': exclude_collections,
      }
    )

    log.log(f'––> Pulling files from {source_url}')
    ctx.invoke(
      files,
      suffix=suffix,
      _moda_subcommand=files_pull,
      _moda_subcommand_parameters={
        'remote_url': source_url,
      }
    )

  if not data_dragon.remote_url_is_this_instance(remote_url=destination_url):
    log.log(f'––> Purging {suffix} data on {destination_url}')
    ctx.invoke(
      remote,
      remote_url=destination_url,
      remote_command=('data', '-s', suffix, 'purge')
    )

    log.log(f'––> Purging {suffix} files on {destination_url}')
    ctx.invoke(
      remote,
      remote_url=destination_url,
      remote_command=('files', '-s', suffix, 'purge')
    )

    log.log(f'––> Pushing data to {destination_url}')
    ctx.invoke(
      data,
      suffix=suffix,
      _moda_subcommand=push,
      _moda_subcommand_parameters={
        'remote_url': destination_url,
      }
    )

    log.log(f'––> Pushing files to {destination_url}')
    ctx.invoke(
      files,
      suffix=suffix,
      _moda_subcommand=files_push,
      _moda_subcommand_parameters={
        'remote_url': destination_url,
      }
    )

  if should_check_status:
    log.log(f'––> Checking DataDragon API status on {destination_url}')
    ctx.invoke(
      remote,
      remote_url=destination_url,
      remote_command=('api', 'status')
    )

  log.log(f'––> Restoring data on {destination_url}')
  ctx.invoke(
    remote,
    remote_url=destination_url,
    remote_command=('data', '-s', suffix, 'restore')
  )

  if should_migrate:
    log.log(f'––> Migrating data on {destination_url}')
    ctx.invoke(
      remote,
      remote_url=destination_url,
      remote_command=('data', 'migrate')
    )

  if force_dry_rule_runs:
    log.log(f'––> Migrating rules to dry run on {destination_url}')
    ctx.invoke(
      remote,
      remote_url=destination_url,
      remote_command=('data', 'migrate', '--from', 'any', '--to', 'dry_run_only')
    )

  log.log(f'––> Restoring files on {destination_url}')
  ctx.invoke(
    remote,
    remote_url=destination_url,
    remote_command=('files', '-s', suffix, 'restore')
  )

  if should_migrate:
    log.log(f'––> Migrating files on {destination_url}')
    ctx.invoke(
      remote,
      remote_url=destination_url,
      remote_command=('files', 'migrate')
    )

  if force_dry_rule_runs:
    log.log(f'––> Verifying dry run status on {destination_url}')
    _, output, _ = ctx.invoke(
      remote,
      remote_url=destination_url,
      remote_command=('configure', 'get', 'dry_run_only'),
      _confirm=False,
      _capture_output=True
    )
    assert output.decode() == 'true\n'

  if should_migrate:
    log.log('––> Verifying migration')
    try:
      ctx.invoke(
        verify,
        _moda_subcommand=migration,
      )
    except click.ClickException:
      pass

  if should_start:
    log.log(f'––> Starting DataDragon API status on {destination_url}')
    ctx.invoke(
      remote,
      remote_url=destination_url,
      remote_command=('api', 'start')
    )
示例#10
0
def run():
    args = json.loads(sys.argv[1])
    from moda import log
    log.set_message_logger(
        lambda message, end: print(json.dumps({'log': message + end})))
    with open(Path(__file__).parent.parent / 'configure.json') as f:
        configure = json.load(f)
    command = Command(args['command'])

    if command is Command.orgs:
        channel = channel_factory(channel_identifier=args['channel'])
        with channel.connected(credentials=prepare_credentials(args)):
            orgs = channel.get_entities(entity_type=ChannelEntity.org)
        print(json.dumps({'data': [{
            **d,
            'id': d['id'],
        } for d in orgs]}))

    elif command is Command.campaigns:
        channel = channel_factory(channel_identifier=args['channel'])
        with channel.connected(credentials=prepare_credentials(args)):
            campaigns = [
                campaign for org in args['orgs']
                for campaign in channel.get_entities(
                    entity_type=ChannelEntity.campaign,
                    parent_ids={ChannelEntity.org: str(org['id'])})
            ]
        print(
            json.dumps({
                'data': [{
                    **d,
                    'org_id': d['org_id'],
                    'id': d['id'],
                } for d in campaigns]
            }))

    elif command is Command.adgroups:
        channel = channel_factory(channel_identifier=args['channel'])
        with channel.connected(credentials=prepare_credentials(args)):
            ad_groups = channel.get_entities(
                entity_type=ChannelEntity.ad_group,
                parent_ids={
                    ChannelEntity.org: str(args['orgID']),
                    ChannelEntity.campaign: str(args['campaignID'])
                })
        print(
            json.dumps({
                'data': [{
                    **d,
                    'org_id': d['org_id'],
                    'campaign_id': d['campaign_id'],
                    'id': d['id'],
                } for d in ad_groups]
            }))

    elif command is Command.execute_rule:
        from io_map import IOMap
        IOMap.map_auto_register = True
        from .rule_executor import RuleExecutor
        date_format = '%Y-%m-%d'
        rule_executor = RuleExecutor(options=args['dbConfig'])
        rule = rule_executor.get_rule(rule_id=args['ruleID'])
        if configure['dry_run_only'] is not False or ('dryRunOnly' in args
                                                      and args['dryRunOnly']):
            if rule._id in configure['non_dry_run_rule_ids']:
                log.log(
                    f'Allowing non dry run for rule {rule._id} despite dry_run_only configuration because rule is listed in the non_dry_run_rule_ids configuration.'
                )
            elif 'nonDryRunRuleIDs' in args and rule._id in args[
                    'nonDryRunRuleIDs']:
                log.log(
                    f'Allowing non dry run for rule {rule._id} despite dry_run_only configuration because rule is listed in the nonDryRunRuleIDs argument.'
                )
            else:
                if not rule.dryRun:
                    log.log(
                        f'Forcing dry run for rule {rule._id} due to dry_run_only configuration.'
                    )
                rule.dryRun = True
        result = rule_executor.execute(
            credentials=prepare_credentials(args),
            rule=rule,
            granularity=args['granularity'],
            start_date=datetime.strptime(args['startDate'], date_format),
            end_date=datetime.strptime(args['endDate'], date_format))

        print(json.dumps({
            "result": result,
        }, cls=RuleSerializer))

    elif command is Command.impact_report:
        from io_map import IOMap
        IOMap.map_auto_register = True
        from .rule_executor import RuleExecutor
        date_format = '%Y-%m-%d'
        credentials = prepare_credentials(args)
        rule_id = args['ruleID']
        report_id = args['reportID']

        rule_executor = RuleExecutor(options=args['dbConfig'])
        rule = rule_executor.get_rule(rule_id=rule_id)
        report_metadata = rule_executor.get_impact_report_metadata(
            credentials=credentials, rule=rule)

        print(
            json.dumps({
                'result': {
                    'reportId': report_id,
                    'granularity': report_metadata.granularity.value
                }
            }))
        if report_metadata.is_valid:
            report = rule_executor.get_impact_report(credentials=credentials,
                                                     rule=rule)
            print(
                f'{{"result":{{"reportId": "{report_id}", "rows": {report.to_json(orient="records")}}}}}'
            )

        input()

    elif command is Command.channel_report:
        fetcher = ChannelPerformanceFetcher(
            raw_channel=args['channel'],
            raw_time_granularity=args['time_granularity'],
            raw_entity_granularity=args['entity_granularity'],
            raw_performance_columns=[])
        start = datetime.fromtimestamp(args['start'])
        end = datetime.fromtimestamp(args['end'])
        report = fetcher.run(credentials=prepare_credentials(args),
                             start=start,
                             end=end)
        print(json.dumps({
            'data': report.to_csv(index=False),
        }))

    elif command is Command.report:
        from .report import get_metadata_report
        result = get_metadata_report(columns=args['columns'],
                                     filters=args['filters'],
                                     options=args['options'],
                                     credentials=prepare_credentials(args))
        print(
            json.dumps({
                'result': {
                    'metadata': result['metadata'],
                    'report': result['report'].to_csv(index=False),
                },
            }))

    elif command is Command.entity_process:
        from .entity import process_entities
        result = process_entities(operations=args['operations'],
                                  context=args['context'],
                                  credentials=prepare_credentials(args))
        print(json.dumps({
            'result': result,
        }))

    else:
        raise ValueError('Unsupported command', command)