示例#1
0
def cli_teardown(argv):
  """Command-line interface for tearing down the remote environment."""
  parser = argparse.ArgumentParser(
      prog="bazel_bf teardown",
      description="Tear down the remote environment entirely.")
  parser.add_argument("--force", action='store_true')

  args = parser.parse_args(argv)

  lambda_config = config.read_config()

  if not args.force:
    print "Configuration is: " + json.dumps(
        lambda_config, indent=2, sort_keys=True)
    sys.stdout.write("Confirm tearing down the remote environment? [yes/No] ")
    choice = raw_input().lower()
    if choice == "yes":
      print "Proceeding..."
    else:
      raise CommandLineException("Abort!")

  (next_lambda_config, err) = setup.teardown(lambda_config)
  config.write_config(next_lambda_config)

  if err:
    raise CommandLineException(
        "Errors were encountered during tear down: " +
        "some resources might not have been properly deleted")
示例#2
0
            files = {'file': (batch_name, json.dumps(items), 'application/json'), 'context': context}
            logging.info(f"POST: sending '{batch_name}' to {url}")
            with session1.post(url, files=files, stream=True) as r:
                if r.status_code >= 300:
                    raise Exception(f"Failed {batch_name} with status {r.status_code}:\n\n{r.json()}")

                results_file_name = f'{batch_name}-results.json'
                logging.info(f"Extracting '{results_file_name}' into the 'results' folder.")
                parser = StreamingFormDataParser(headers=r.headers)
                masked_batch = ValueTarget()
                parser.register('file', masked_batch)
                parser.register('results', FileTarget(f'results/{results_file_name}'))
                for chunk in r.iter_content():
                    parser.data_received(chunk)

            masked_batch = json.loads(masked_batch.value)
            # The batch writer will automatically handle buffering and sending items in batches.
            # In addition, the batch writer will also automatically handle any unprocessed items and resend them as needed.
            with target_table.batch_writer() as batch:
                for item in masked_batch:
                    batch.put_item(Item=item)
            
            start_key = response.get('LastEvaluatedKey', None)
            done = start_key is None
            scan_kwargs['ExclusiveStartKey'] = start_key
            if not done:
                response = table.scan(**scan_kwargs)
                items = response.get('Items', [])
    finally:
        teardown(session1)
示例#3
0
            filename = askopenfilename()
            if not filename:
                break
            basename = ntpath.basename(filename)
            process_files = [(filename, 'masked')]
            for file_name, masked_folder in process_files:
                files = {'file': open(file_name, 'rb'), 'context': context}
                logging.info(f"POST: sending '{file_name}' to {url}")
                with s.post(url, files=files, stream=True) as r:
                    if r.status_code >= 300:
                        logging.info(
                            f"Failed with status {r.status_code}:\n\n{r.json()}"
                        )
                        break

                    logging.info(
                        f"Extracting 'masked_{basename}' and 'masked_{basename}_results.json' into {masked_folder}."
                    )
                    parser = StreamingFormDataParser(headers=r.headers)
                    parser.register(
                        'file',
                        FileTarget(f'{masked_folder}/masked_{basename}'))
                    parser.register(
                        'results',
                        FileTarget(
                            f'{masked_folder}/masked_{basename}_results.json'))
                    for chunk in r.iter_content(4096):
                        parser.data_received(chunk)
    finally:
        teardown(s)
示例#4
0
  lines = args.lines
  iterations = args.iterations
  test_folder = 'test-files'
  file_name = f'test-{lines}.csv'
  file_path = f'{test_folder}/{file_name}'
  os.makedirs(test_folder, exist_ok=True)
  if not os.path.exists(file_path):
    logging.info(f'Creating {file_name}...')
    with open(file_path, 'w') as f:
      test = 'this is a test'
      char_num = 1000 - len(test) - len(os.linesep)
      chars_per_col = 10
      num_cols = int(char_num / chars_per_col)
      remaining_padding = 'x' * (char_num % chars_per_col)
      line = ','.join(['x' * (chars_per_col - 1) for i in range(num_cols)] + [f'{remaining_padding}this is a test'])
      for i in range(lines):
        f.write(line)
        f.write(os.linesep)
    logging.info(f'Created {file_name}.')
  with requests.Session() as session:
    try:
      setup(session)
      context = json.dumps({
          "fileSearchContextName": file_search_context_name,
          "fileMaskContextName": file_mask_context_name
      })
      utils.benchmark_search_mask(session, file_path, context, 
                                  lines, 'text/csv', iterations)
    finally:
      teardown(session)