Esempio n. 1
0
 def setUp(self) -> None:
     args = '--data=mnist --imsize=32 ' \
            '--model=gan --encoder=conv --synthesis=style --hdim=32 --zdim=32 ' \
            '--epochs=30 --bsz=8 ' \
            '--r1=1 '
     self.args = utils.parser.parse_args(args.split())
     utils.setup(self.args)
Esempio n. 2
0
def setup():
    """Setup for upstream plugin"""
    models.setup()
    utils.setup()
    addbook.setup()
    covers.setup()
    merge_authors.setup()

    import data
    data.setup()

    # setup template globals
    from openlibrary.i18n import ugettext, ungettext, gettext_territory

    web.template.Template.globals.update({
        "gettext": ugettext,
        "ugettext": ugettext,
        "_": ugettext,
        "ungettext": ungettext,
        "gettext_territory": gettext_territory,
        "random": random.Random(),
        "commify": web.commify,
        "group": web.group,
        "storage": web.storage,
        "all": all,
        "any": any,
        "locals": locals
    });

    import jsdef
    web.template.STATEMENT_NODES["jsdef"] = jsdef.JSDefNode

    setup_jquery_urls()
Esempio n. 3
0
def main():
    print "Starting main()\n"
    u.setup()
    u.calibrate()
    a.hold_cube_and_deliver_ambulance()
    a.hold_cube_and_get_prism()
    u.sd()
Esempio n. 4
0
def main():
    args = parser.parse_args()
    args.timestamp = tools.get_timestamp()

    tools.mkdir_or_exist(args.workdir)
    tools.setup(args.benchmark, args.deterministic, args.seed)

    if args.gpu is not None:
        warnings.warn('You have chosen a specific GPU. This will completely '
                      'disable data parallelism.')

    if args.dist_url == "env://" and args.world_size == -1:
        args.world_size = int(os.environ["WORLD_SIZE"])

    args.distributed = args.world_size > 1 or args.multiprocessing_distributed

    ngpus_per_node = torch.cuda.device_count()
    if args.multiprocessing_distributed:
        # Since we have ngpus_per_node processes per node, the total world_size
        # needs to be adjusted accordingly
        args.world_size = ngpus_per_node * args.world_size
        # Use torch.multiprocessing.spawn to launch distributed processes: the
        # main_worker process function
        mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
    else:
        # Simply call main_worker function
        main_worker(args.gpu, ngpus_per_node, args)
Esempio n. 5
0
def setup():
    """Setup for upstream plugin"""
    models.setup()
    utils.setup()
    addbook.setup()
    covers.setup()
    merge_authors.setup()

    import data
    data.setup()

    # setup template globals
    from openlibrary.i18n import ugettext, ungettext, gettext_territory

    web.template.Template.globals.update({
        "gettext": ugettext,
        "ugettext": ugettext,
        "_": ugettext,
        "ungettext": ungettext,
        "gettext_territory": gettext_territory,
        "random": random.Random(),
        "commify": web.commify,
        "group": web.group,
        "storage": web.storage,
        "all": all,
        "any": any,
        "locals": locals
    });

    import jsdef
    web.template.STATEMENT_NODES["jsdef"] = jsdef.JSDefNode

    setup_jquery_urls()
Esempio n. 6
0
def main():
    script_start = str(datetime.datetime.now()).replace(':',
                                                        '-').replace(' ', 'T')

    args = utils.parse_args()

    args.script_start = script_start
    args_path = Path(f'args_{script_start}.json')
    with open(args_path, 'w') as f:
        json.dump(vars(args), f, indent=4)

    utils.setup(args.use_sb3, args.debug_nans)

    eval_seed = args.seed
    if eval_seed is not None:
        eval_seed += args.num_envs

    # ---------------- TRAINING STARTS HERE ----------------

    # Set up gym environment
    env = utils.make_env(args, include_norm=True)
    # Set up model
    model = setup_model(args, env)

    callbacks = []
    utils.append_callback(callbacks, utils.create_save_callback(args))
    utils.append_callback(callbacks, utils.create_eval_callback(args))

    dry_run(model, env, int(args.warmup_steps))
    env.seed(args.seed)

    start_time = time.perf_counter()
    # Train the model (need to put at least 100k steps to
    # see something)
    model.learn(total_timesteps=int(args.steps), callback=callbacks)
    duration = time.perf_counter() - start_time
    print(f'Training took {duration} seconds.')
    # env.envs[0].plot_rewards()
    print('Number of episodes in each environment:',
          [env_.num_episodes for env_ in env.envs])

    model_fname = Path(f'sdc_model_{args.model_class.lower()}_'
                       f'{args.policy_class.lower()}_{script_start}.zip')
    model.save(str(model_fname))

    env_fname = Path(f'sdc_env_{script_start}.pkl')
    utils.save_env(env_fname, env)

    # delete trained model to demonstrate loading, not really necessary
    # del model

    # delete trained model to demonstrate loading, not really necessary
    # del model

    # ---------------- TESTING STARTS HERE ----------------

    fig_path = Path(f'results_{script_start}.pdf')

    run_tests(model, args, seed=eval_seed, fig_path=fig_path)
Esempio n. 7
0
def main():
    print "Starting main()\n"
    u.setup()
    u.calibrate(
    )  # You only need to include this command if you want the tophats to sense better at the cost of speed.
    a.get_gas_valve()
    print "Finished main\n"
    u.shutdown(86)
Esempio n. 8
0
def main():
    print "Starting main()\n"
    u.setup()
    u.calibrate()
    a.get_crates()
    a.put_crates_in_correct_zone()
    a.get_botguy()
    a.put_botguy_on_side()
    u.shutdown()
Esempio n. 9
0
def main():
	# Unpickles data from file, stores it as dictionary of length 4
	setup(isTraining=True, overwrite=False)
	print("done processing training data")
	data = unpickle("data/training.pickle")
	# A dictionary that keys every word in our vocabulary to an index
	train_vocab = data[0]
	# A list of the tweets that we will be training on (2914 tweets)
	train_sentences = data[1]
	# print("Sentences", len(sentences))
	# An embedding matrix that maps each word to a 300 Dimensional Embedding
	train_embeddings = tf.convert_to_tensor(data[2], tf.float32)
	# A dictionary that maps the index of a word to a list containing the indices of its 4 synonyms
	train_synonym_indices = tf.convert_to_tensor(data[3], tf.int32)

	# A list of sentiment labels corresponding to tweets; labels can be -1 (negative), 0 (objective), or (1) positive
	# (2914, 1)
	train_sentiment_labels = tf.convert_to_tensor(data[4], tf.float32)
	# A list of emotion labels corresponding to tweets; each label has 8 slots, where a 1 in that position corresponds to that
	# emotion being labelled. So, each tweet can be associated to several different emotions
	# Shape (2914, 8)
	train_emotion_labels = tf.convert_to_tensor(data[5], tf.float32)
	data = None

	model = Model("emotion_only_s")

	train(
		model,
		train_sentences,
		train_emotion_labels,
		train_sentiment_labels,
		train_embeddings,
		train_synonym_indices,
	)

	setup(isTraining=False, overwrite=False)
	print("done processing testing data")
	data = unpickle("data/testing.pickle")

	test_vocab = data[0]
	test_sentences = data[1]
	"""for i in range(len(train_sentences)):
		train_sentences[i] = tf.convert_to_tensor(train_sentences[i], tf.int32)"""
	test_embeddings = tf.convert_to_tensor(data[2], tf.float32)
	test_synonym_indices = tf.convert_to_tensor(data[3], tf.int32)
	test_sentiment_labels = tf.convert_to_tensor(data[4], tf.float32)
	test_emotion_labels = tf.convert_to_tensor(data[5], tf.float32)
	data = None

	test(
		model,
		test_sentences,
		test_emotion_labels,
		test_sentiment_labels,
		test_embeddings,
		test_synonym_indices,
	)
Esempio n. 10
0
def main():
    script_start = str(datetime.datetime.now()).replace(':',
                                                        '-').replace(' ', 'T')
    args = parse_args()
    utils.setup(True)

    eval_seed = args.seed
    if eval_seed is not None:
        eval_seed += 1

    th.manual_seed(args.seed)
    device = th.device('cuda') if th.cuda.is_available() else th.device('cpu')

    dataloader = th.utils.data.DataLoader(
        DataGenerator(
            args.M,
            args.lambda_real_interval,
            args.lambda_imag_interval,
        ),
        batch_size=args.batch_size,
    )

    model = PreconditionerPredictor(args.M).to(device)
    loss_func = th.nn.MSELoss()
    opt = th.optim.Adam(model.parameters(), lr=args.learning_rate)

    if args.model_path is not None:
        model, opt, old_steps = load_model(args.model_path, model, opt, device)
    model.train()
    steps = int(args.steps)
    steps_num_digits = len(str(steps))

    last_losses = th.zeros(100)
    for (step, (lams, min_diags)) in enumerate(dataloader):
        lams = lams.float().to(device)
        min_diags = min_diags.to(device)

        diags = model(lams)
        loss = loss_func(diags, min_diags)

        opt.zero_grad()
        loss.backward()
        opt.step()

        last_losses[step % 100] = loss.item()
        if step % 100 == 0:
            print(f'[{step:>{steps_num_digits}d}/{steps}] '
                  f'mean loss: {th.mean(last_losses[:step + 1]).item():.5f}')

        if step >= steps:
            break

    if steps > 0:
        cp_path = Path(f'sl_model_{script_start}.pt')
        save_model(cp_path, model, opt, steps + old_steps)
    fig_path = Path(f'sl_results_{script_start}.pdf')
    run_tests(model, device, args, seed=eval_seed, fig_path=fig_path)
Esempio n. 11
0
def main():
    print "Starting main()\n"
    u.setup()
    u.calibrate()
    a.get_ambulance()
    a.get_blocks()
    a.deliver_ambulance_and_blocks()
    #a.get_firefighters()
    #a.deliver_firefighters()
    u.shutdown()
Esempio n. 12
0
def main():
    print "Starting main()\n"
    u.setup()
    u.calibrate(
    )  # You only need to include this command if you want the cliffs to sense better at the cost of speed.
    a.get_left_coupler()
    a.go_to_magnets()
    u.reset_roomba()
    a.do_magnets()
    a.deliver_left_coupler()
    print "Finished main\n"
    u.shutdown()
Esempio n. 13
0
def main():
    """Run the application"""
    print('Starting application...')
    utils.setup()
    print('IP Address is {}'.format(utils.get_pi_network_ip_address()))
    print('Running application...')
    try:
        while True:
            for switch in SWITCHES:
                if switch.should_refresh_config():
                    print('Refreshing config')
                    utils.update_dnsmasq_config()
            time.sleep(0.1)
    except KeyboardInterrupt:
        print('Shutting down...')
Esempio n. 14
0
    def test_img_format(self):
        args = '--data=mnist --imsize=32  --bsz=8 '
        self.args = utils.parser.parse_args(args.split())
        utils.setup(self.args)

        ds_train, ds_val, ds_info = data.load_datasets(self.args)
        train_sample = next(iter(ds_train))
        val_sample = next(iter(ds_val))

        for sample in [train_sample, val_sample]:
            tf.debugging.assert_type(sample, tf.uint8)
            tf.debugging.assert_type(sample, tf.uint8)

            # Probablisitic asserts
            min_val, max_val = tf.reduce_min(sample), tf.reduce_max(sample)
            tf.debugging.assert_greater(max_val, 127 * tf.ones_like(max_val))
Esempio n. 15
0
def example():
    cx = utils.setup(FILENAME, -1)

    for a, b in SYMBOLS.items():
        cx.m.set_label(a, b)

    seven_segment.table(cx.m, 0x7e29, 0x7e39, verbose=False)

    ct = utils.cmd_tbl(cx, 0x7c5d, 0x7c91)
    cta = utils.arg_range(cx, ct, 0x7d65, 0x7d81)
    utils.cmd_dispatch(cx, cta, 0x644c)

    utils.key_dispatch(cx, 0x640c, 0x644c)

    utils.dsp_dispatch(cx, 0x6848, 0x6858)

    for i in (0x614c, 0x619c, 0x61a3, 0x69dd, 0x69e4):
        utils.float70(cx.m, i)

    utils.square_tbl(cx.m)

    utils.apply_labels(cx, "A")
    utils.tramp(cx)

    return NAME, (cx.m, )
Esempio n. 16
0
    def __init__(self,
                 data_tr,
                 data_te,
                 n,
                 d_type,
                 model_type='default',
                 trans=True,
                 encode="pc"):
        DNN.ngram = n
        self.tr_x, self.tr_y, self.te_x, self.te_y = utils.setup(
            data_tr, data_te, DNN.ngram, d_type, DNN.min_pitch, DNN.n_outputs,
            trans, encode)
        self.tr_x, self.val_x, self.tr_y, self.val_y = tts(self.tr_x,
                                                           self.tr_y,
                                                           test_size=0.2)
        if d_type == "pitch":
            DNN.n_outputs = DNN.max_pitch - DNN.min_pitch
            DNN.n_features = 20 * (DNN.ngram - 1)
        elif d_type == "rhythm":
            DNN.n_outputs = len(utils.r_dict)
            DNN.n_features = len(utils.r_dict) * (DNN.ngram - 1)
        self.X = tf.placeholder(tf.float32,
                                shape=[None, DNN.n_features],
                                name="X")
        self.y = tf.placeholder(tf.int32, shape=[None], name='y')
        if model_type == 'bn':
            self.is_training = tf.placeholder(tf.bool, name='is_training')
        self.model = model_type
        self.save_path = "dnn" + d_type + model_type + str(trans) + str(
            DNN.ngram) + "/best.ckpt"

        self.build()
Esempio n. 17
0
    def __init__(self):
        self.INPUT_BUCKET_NAME = INPUT_BUCKET_NAME
        self.OUTPUT_BUCKET_NAME = OUTPUT_BUCKET_NAME

        # Set the region in which the lab is running
        self.LAB_REGION = boto3.session.Session().region_name

        # Create S3 resource
        self.s3 = self.create_s3_resource()

        # Set up the input bucket and copy the CSV files. Also, set up the
        # output bucket
        self.bucketSource = s3setup.setup(inputbucket=self.INPUT_BUCKET_NAME,
                                          outputbucket=self.OUTPUT_BUCKET_NAME,
                                          region=self.LAB_REGION)

        inputbucket = self.s3.Bucket(self.INPUT_BUCKET_NAME)
        outputbucket = self.s3.Bucket(self.OUTPUT_BUCKET_NAME)

        # Get summary information for all objects in input bucket
        # Iterate over the list of object summaries
        for object_summary in inputbucket .objects.all():
            # Get the object key from each object summary
            csvkey = object_summary.key

            # Retrieve the object with the specified key from the input bucket
            self.download_file_from_bucket(inputbucket, csvkey)
Esempio n. 18
0
def main(args):
    cfg = utils.setup(args)

    command = [
        './out-perf.masstree/benchmarks/dbtest --bench {} --retry-aborted-transactions --parallel-loading --db-type ndb-ic3'
        ' --backoff-aborted-transactions --scale-factor {} --bench-opt "{}" --num-threads {} --runtime {} '
        .format(args.workload_type, args.scale_factor, args.bench_opt,
                args.nworkers, args.eval_time)
    ]

    erl = ERL(log_dir=cfg.get('log_directory'),
              log_rate=args.log_rate,
              kid_dir=cfg.get('kid_directory'),
              draw_graph=args.graph,
              command=command,
              sync_period=args.sync_period,
              learning_rate=args.learning_rate,
              rd=args.reward_decay,
              selection=args.selection,
              pop_size=args.psize,
              bfactor=args.random_branch,
              mutate_decay=args.mutate_decay,
              mut_frac=args.mutate_rate)

    erl.training(generations=args.max_iterations,
                 learning_rate=args.learning_rate,
                 samples_per_distribution=args.samples_per_distribution,
                 initial_policy=args.pickup_policy,
                 load_per_sample=args.load_per_sample)

    return
Esempio n. 19
0
def setup():
  """Configures dataset, model, backend, and runner."""
  if FLAGS.preprocess_outdir:
    outdir = FLAGS.preprocess_outdir
  elif FLAGS.outdir:
    outdir = FLAGS.outdir
  else:
    outdir = tempfile.mkdtemp()
  backend, ds, post_process, count = utils.setup(
      FLAGS.dataset, FLAGS.dataset_path, FLAGS.dataset_list, FLAGS.cache,
      outdir, FLAGS.space_to_depth_block_size,
      FLAGS.tpu_transpose, FLAGS.data_format, FLAGS.total_sample_count)
  final_results = {
      "runtime": backend.name(),
      "version": backend.version(),
      "time": int(time.time()),
  }

  if FLAGS.num_tpus == 1:
    runner = Runner(
        backend, ds, FLAGS.batch_size, FLAGS.threads, post_process=post_process)
  else:
    backend_lists = []
    for _ in range(FLAGS.num_tpus):
      backend_lists.append(BackendTensorflow())
    runner = MultiCloudTpuRunner(
        backend_lists,
        ds,
        FLAGS.batch_size,
        FLAGS.threads,
        post_process=post_process)
  return final_results, count, runner
Esempio n. 20
0
def simulate(args, config):
    # Set up pybullet env and policies
    envs, policy_manager, keep_alive = setup(args, config)
    signal(
        SIGINT, lambda sig, frame: exit_handler([
            value['exit_handler'] for key, value in keep_alive.items()
            if value['exit_handler'] is not None
        ]))

    observations = ray.get([e.reset.remote() for e in envs])
    observations = [obs for obs, _ in observations]

    inference_policies = policy_manager.get_inference_nodes()
    multiarm_motion_planner = inference_policies['multiarm_motion_planner']
    if args.mode == 'benchmark':
        multiarm_motion_planner.deterministic = True

    remaining_observations = []
    ready_envs = copy(envs)
    while (True):
        env_actions = [
            multiarm_motion_planner.act(observation['multiarm_motion_planner'])
            for observation in observations
        ]
        ready_envs, observations, remaining_observations = step_env(
            all_envs=envs,
            ready_envs=ready_envs,
            ready_actions=env_actions,
            remaining_observations=remaining_observations)
        print('\r{:02d}'.format(len(observations)), end='')
Esempio n. 21
0
def main():
    args = setup()

    print("Loading model")
    """
    model_class = getattr(torchvision.models, args.model)
    if args.model in ['googlenet', 'inception_v3']:
        model = model_class(pretrained=True, aux_logits=False)
    else:
        model = model_class(pretrained=True)
    model.fc = torch.nn.Linear(model.fc.in_features, 43)
    """
    model = resnet18(num_classes=43)
    model.to(args.device).train()
    if args.parallel:
        model = torch.nn.parallel.DataParallel(model)
    train_loader, val_loader = gtsrb(args.data_dir, batch_size=args.batch_size, workers=args.workers)

    if args.optimizer == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum,
                                    weight_decay=args.l2)
    elif args.optimizer == 'adam':
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2)
    criterion = torch.nn.CrossEntropyLoss()

    train(model, train_loader, val_loader, optimizer, criterion, args.epochs, args.lr, args.device)

    path = os.path.join(args.root_dir, 'weights', f"{args.model}_{args.data}.pth")
    torch.save(model.state_dict(), path)
Esempio n. 22
0
def obfuscate(args):
    # How market is affected by increasing obfuscation levels
    create_consumers_fn, create_market_fn = setup(args)
    ray.init()

    @ray.remote(num_cpus=1)
    def remote_simulate(consumer_fn, market_fn):
        return run_simulation(consumer_fn, market_fn, use_tqdm=False)

    obfuscation_mus = [i * 0.2 for i in range(40)]
    results = ray.get([
        remote_simulate.remote(
            create_consumers_fn, lambda: create_market_fn(
                obfuscation_dist=create_distribution({
                    'type': 'normal',
                    'mu': obsfuscation_mu,
                    'sigma': 0.5,
                    'lower_bound': 0.0
                }))) for obsfuscation_mu in obfuscation_mus
    ])
    for result_key in [
            'firm_profits', 'consumer_search_costs', 'consumer_search_count',
            'consumer_prices'
    ]:
        result = [mean(r[result_key]) for r in results]
        plot_result(x=obfuscation_mus,
                    y=result,
                    x_label='obfuscation_level',
                    y_label=result_key)
Esempio n. 23
0
def run(args):
    # Setup
    strategy = setup(args)
    logging.info(args)

    # Transfer learn
    for ds_id in args.ds_ids:
        class_transfer_learn(args, strategy, ds_id)
Esempio n. 24
0
def simulate(args):
    # Default simulation
    results = run_simulation(*setup(args))
    for result_key in results:
        print(result_key)
        print(f"\tmean: {mean(results[result_key])}")
        print(f"\tmax: {max(results[result_key])}")
        print(f"\tmin: {min(results[result_key])}")
Esempio n. 25
0
def setup():
    """Setup for upstream plugin"""
    models.setup()
    utils.setup()
    addbook.setup()
    covers.setup()
    merge_authors.setup()
    
    import data
    data.setup()
    
    # Types for upstream paths
    types.register_type('^/authors/[^/]*$', '/type/author')
    types.register_type('^/books/[^/]*$', '/type/edition')
    types.register_type('^/languages/[^/]*$', '/type/language')

    types.register_type('^/subjects/places/[^/]*$', '/type/place')
    types.register_type('^/subjects/people/[^/]*$', '/type/person')
    types.register_type('^/subjects/[^/]*$', '/type/subject')

    # fix photo/cover url pattern
    ol_code.Author.photo_url_patten = "%s/photo"
    ol_code.Edition.cover_url_patten = "%s/cover"

    # setup template globals
    from openlibrary.i18n import ugettext, ungettext
            
    web.template.Template.globals.update({
        "gettext": ugettext,
        "ugettext": ugettext,
        "_": ugettext,
        "ungettext": ungettext,
        "random": random.Random(),
        "commify": web.commify,
        "group": web.group,
        "storage": web.storage,
        "all": all,
        "any": any,
        "locals": locals
    });
    
    import jsdef
    web.template.STATEMENT_NODES["jsdef"] = jsdef.JSDefNode
    
    setup_jquery_urls()
Esempio n. 26
0
def main():

    exp_name = f'baseline_{now()}'
    device, log, result_dir = setup(exp_name, conf)

    train_df = load_csv(conf.train_csv)
    if conf.npy:
        train_images = np.load(conf.train_images)
    else:
        train_images = pd.read_parquet(conf.train_images)

    test_df = load_csv(conf.test_csv)
    if conf.npy:
        test_images = np.load(conf.test_images)
    else:
        test_images = pd.read_parquet(conf.test_images)

    log.info('done')
    for i in range(5):
        if i != conf.fold:
            continue

        if "resnet" in conf.arch or "resnext" in conf.arch:
            model_ft = ResNet(conf,
                              arch_name=conf.arch,
                              input_size=conf.image_size)
            model_ft.load_state_dict(
                torch.load("result/baseline_2020_03_21_13_01_08/model_0.pkl"))
        elif "densenet" in conf.arch:
            model_ft = DenseNet(conf,
                                arch_name=conf.arch,
                                input_size=conf.image_size)
        elif "efficientnet" in conf.arch:
            model_ft = EfficientNet(conf, arch_name=conf.arch)

        criterion = [
            nn.CrossEntropyLoss(reduction="none"),
            nn.CrossEntropyLoss(reduction="none"),
            nn.CrossEntropyLoss(reduction="none")
        ]
        criterion = [c.to(device) for c in criterion]

        model_ft, val_preds = train_model(train_df,
                                          train_images,
                                          test_df,
                                          test_images,
                                          model_ft,
                                          criterion,
                                          log,
                                          device,
                                          result_dir,
                                          fold=i,
                                          num_epoch=conf.num_epoch)

        torch.save(model_ft.state_dict(), result_dir / f'model_{i}.pkl')
        np.save(result_dir / f'val_preds_{i}.npy', val_preds)
Esempio n. 27
0
def setup():
    """Setup for upstream plugin"""
    models.setup()
    utils.setup()
    addbook.setup()
    covers.setup()
    
    # overwrite ReadableUrlProcessor patterns for upstream
    ReadableUrlProcessor.patterns = [
        (r'/books/OL\d+M', '/type/edition', 'title', 'untitled'),
        (r'/authors/OL\d+A', '/type/author', 'name', 'noname'),
        (r'/works/OL\d+W', '/type/work', 'title', 'untitled')
    ]

    # Types for upstream paths
    types.register_type('^/authors/[^/]*$', '/type/author')
    types.register_type('^/books/[^/]*$', '/type/edition')
    types.register_type('^/languages/[^/]*$', '/type/language')

    types.register_type('^/subjects/places/[^/]*$', '/type/place')
    types.register_type('^/subjects/people/[^/]*$', '/type/person')
    types.register_type('^/subjects/[^/]*$', '/type/subject')

    # fix photo/cover url pattern
    ol_code.Author.photo_url_patten = "%s/photo"
    ol_code.Edition.cover_url_patten = "%s/cover"

    # setup template globals
    from openlibrary.i18n import gettext as _
            
    web.template.Template.globals.update({
        "gettext": _,
        "_": _,
        "random": random.Random(),
        "commify": web.commify,
        "group": web.group,
        "storage": web.storage,
        "all": all,
        "any": any,
        "locals": locals
    });
    
    setup_jquery_urls()
Esempio n. 28
0
 def __init__(self,
              start_date,
              end_date,
              dimension,
              metric,
              goal,
              version,
              analytics='',
              filter_data=''):
     self.start_date = start_date
     self.end_date = end_date
     self.dimension = dimension
     self.metric = metric
     self.goal = goal
     self.version = version
     self.filter_data = filter_data
     self.analytics = self.setup_auth()
     self.setup_plot()
     u.setup()
Esempio n. 29
0
def method4(inputTuple):
    queryFirst = inputTuple[0]
    foreText = inputTuple[1]
    searchList = inputTuple[2]
    blacklist = ["is","it","to","for","the","and","but","there","their","they're","she","his","her","mine","this","then","that"]
    i = 'for'
    j = 0
    word = i
    URL = 'http://rhymebrain.com/talk?function=getRhymes&word=%s' %concat(queryFirst)
    u = urllib2.urlopen(URL)
    q = u.read()
    u.close()
    text = json.loads(q)
    index = 0
    score = 0
    stats = (index,score)
    for i in text:
        if i["syllables"] > stats[1]:
            stats = (index,i["syllables"])
        index += 1
    try:
        rhyme = text[stats[0]]['word']
    except:
        return ("Error","Error")
    foreText2 = setup(rhyme)
    searchList2 = foreText2.split()
    tuple = (rhyme,foreText2,searchList2)
    theoryTuple = method1(tuple)
    if theoryTuple == ("Error","Error") or rhyme.lower() == queryFirst.lower():
        return ("Error","Error")
    speech = "%s rhymes with %s . %s" %(capitalize(queryFirst),rhyme,theoryTuple[0])
    text = '"%s" rhymes with "%s." %s###foreText:%s' %(capitalize(queryFirst),rhyme,theoryTuple[1],setup(rhyme))
    outTuple = (rhyme,foreText,searchList)
    if method3(outTuple) != ("Error","Error"):
        if sentence(method3(outTuple)[1])[0][-(len(queryFirst)+1):-1].lower() != queryFirst.lower():
            speech1 = method3(outTuple)[0]
            speech = "%s rhymes with %s . %s" %(capitalize(queryFirst),rhyme,speech1)
            text1 = method3(outTuple)[1]
            sentences = method3(outTuple)[1].split('in the New York Times comes up as "')[1]
            sentences = sentences.split('."')[0]
            text = '"%s" rhymes with "%s." %s###foreText:%s' %(capitalize(queryFirst),rhyme,text1,sentences)
            senText = sentence(text.split('###')[0])
            senTextLast = senText[-2]
            senTextLastList = senTextLast.split('"')
            rhymeUlt = senTextLastList[1]
            twoParts = senTextLast.split(rhymeUlt)
            onePart = insert(twoParts,capitalize(queryFirst),1)
            onePartString = merge(onePart)
            senTextInsert = senText[:-2]
            senTextInsert.append(onePartString)
            senTextInsert = merge(senTextInsert)
            senTextInsert = senTextInsert + ("###")
            senTextInsert = senTextInsert + sentence(text.split('###')[1])[0]
            text = merge(senTextInsert)
    return (speech,text)
Esempio n. 30
0
def main():
    """
    x .. 入力特徴量
    y .. 教師ラベル(0/1)
    LSTM に通さず,フレーム毎に独立に推定するので,バッチサイズは1じゃなくて良い
    """
    df_list = setup()

    df = pd.concat(df_list[:90])
    x = df.iloc[:, -128:].values

    print(x.shape)
    y = np.clip(1.0 - (df['utter_A'] + df['utter_B']), 0, 1)

    df = pd.concat(df_list[90:])
    x_val = df.iloc[:, -128:].values
    print(x_val.shape)
    y_val = np.clip(1.0 - (df['utter_A'] + df['utter_B']), 0, 1)

    net = TimeActionPredict()
    print('Model :', net.__class__.__name__)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    criterion = nn.CrossEntropyLoss(weight=torch.tensor([1.0, 1.0]).to(device))
    optimizer = optim.Adam(net.parameters(), lr=0.001)

    for name, param in net.named_parameters():
        if 'fc' in name or 'lstm' in name:
            param.requires_grad = True
            print("勾配計算あり。学習する:", name)
        else:
            param.requires_grad = False

    train_dataloader = data.DataLoader(list(zip(x, y)),
                                       batch_size=1,
                                       shuffle=False)

    test_dataloader = data.DataLoader(list(zip(x_val, y_val)),
                                      batch_size=1,
                                      shuffle=False)

    dataloaders_dict = {
        "train": train_dataloader,
        "val": test_dataloader,
        "test": test_dataloader
    }

    train(net=net,
          dataloaders_dict=dataloaders_dict,
          criterion=criterion,
          optimizer=optimizer,
          num_epochs=50,
          output='./lstm_model/',
          resume=True)
Esempio n. 31
0
def main():
    print "Starting main()\n"
    u.setup()
    u.calibrate()
    a.only_first_three()
    # m.backwards(1300)
    m.turn_right()
    m.backwards(4500)
    u.sd()


    a.get_low_poms_cheeky()
    a.get_frisbee()
    a.get_mid_poms()
    a.get_high_poms_cheeky()
    a.get_farther_high_poms() 
    a.get_farther_mid_poms()
    a.get_farther_low_poms()
    print "Finished main\n"
    u.shutdown(86)
Esempio n. 32
0
def optimize():


    phase = "passive"
    material_model = "holzapfel_ogden"
    
    active_model = "active_strain"
    fiber_angle = (60,-60)

    # (strain, volume, regularization)
    weight = (0.0,1.0,0.0)
    ndiv = 1
    control_regions = [2]
    strain_regions = [2]
    eps_strain = 0.0
    eps_vol = 0.0
    space = "R_0"
    unload = True
    pressures = [0, 0.6, 0.8]
    h5name = "test_unload_geometry_idx_0.h5"
    approx = "interpolate"
    geometry_index = "0"
    
    params, strains, vols, ap_params, p_lv, w, u, p, f_ex, basis, u_img \
        =setup(phase = phase,
               material_model = material_model,
               active_model = active_model,
               fiber_angle = fiber_angle,
               weight = weight,
               ndiv = ndiv,
               control_regions = control_regions,
               strain_regions = strain_regions,
               eps_vol = eps_vol,
               eps_strain = eps_strain,
               pressures = pressures,
               space = space, unload = unload,
               h5name=h5name, approx = approx,
               geometry_index = geometry_index,
               isotropic=True, restart = True)
    

    patient = set_patient_attributes(params, pressures, vols, strains, True)

    import numpy as np
    volume = np.transpose(vols)[1]

    ap_params["Unloading_parameters"]["estimate_initial_guess"] = False
    from pulse_adjoint.run_optimization import run_unloaded_optimization
    # from IPython import embed; embed()
    ap_params['Unloading_parameters']['maxiter'] = 2
    ap_params['Unloading_parameters']['unload_options']['maxiter'] = 2
    ap_params["Optimization_parameters"]['passive_maxiter'] = 2
    run_unloaded_optimization(ap_params, patient)
Esempio n. 33
0
def setup_inversion():
    model_path = sys.argv[1]
    config_path = sys.argv[2]
    cfg, G, lidar, device = utils.setup(model_path,
                                        config_path,
                                        ema=True,
                                        fix_noise=True)
    if osp.exists(cfg.dataset.root):
        dataset = define_dataset(cfg.dataset, phase="test")
    else:
        dataset = None
    return cfg, G, lidar, device, dataset
Esempio n. 34
0
def main():

    exp_name = f'baseline_{now()}'
    device, log, result_dir = setup(exp_name, conf)

    train_df = load_csv(conf.train_csv)
    if conf.npy:
        train_images = np.load(conf.train_images)
    else:
        train_images = pd.read_parquet(conf.train_images)

    train_df["gr"] = 0
    train_df["cd"] = 0
    train_df["vd"] = 0
    train_df["image_mean"] = 0

    models = [f"se_resnext50_f{i}.pkl" for i in range(5)]

    preds = np.zeros((len(train_df), conf.gr_size + conf.vd_size + conf.cd_size))
    image_stats = np.zeros((len(train_df), 2))

    log.info('done')
    for i in range(5):

        model = ResNet(conf, arch_name=conf.arch,
                          input_size=conf.image_size)
        model.load_state_dict(torch.load(models[i]))
        model.to(device)

        ds = val_split(train_df, train_images, fold=i)
        _, val_ds, _, val_images = ds['train'], ds['val'], ds['train_images'], ds['val_images']

        test_preds = predict(model, val_ds, val_images, valid_transform,
                             device)

        print(test_preds.shape)
        te_ind = ds['te_ind']
        preds[te_ind] += test_preds
        image_stats[te_ind, 0] = val_images.mean((1, 2))
        image_stats[te_ind, 0] = val_images.std((1, 2))

    preds = np.concatenate([preds, image_stats], axis=1)

    for t in ["grapheme_root", "vowel_diacritic", "consonant_diacritic"]:
        rf = RandomForestClassifier(n_jobs=16)
        # train = xgb.DMatrix(preds, label=train_df[t])
        # params = {"max_depth": 4, "nthread": 16, "objective": "multi:softmax",
        #           "eval_metric": ["merror", "mlogloss"], "num_class": conf.gr_size}
        # xgb.cv(params, train, num_boost_round=1000, nfold=5, seed=conf.seed,
        #        early_stopping_rounds=40, verbose_eval=10)
        rf.fit(preds, train_df[t])
        with open(f"{t}_rf2.pkl", "wb") as f:
            joblib.dump(rf, f)
Esempio n. 35
0
File: ggd.py Progetto: arq5x/ggd
def install(args):
    """
    Install a dataset based on a GGD recipe
    """
    recipe = args.recipe
    utils.setup(args)

    # get the raw YAML string contents of the recipe
    recipe_dict = _get_recipe(recipe, args.cookbook)

    if recipe_dict is None:
        sys.stderr.write("recipe not found exiting.\n")
        sys.exit(2)

    ret = _run_recipe(args, recipe_dict)
    if ret == 0:
        # TO DO
        # register_installed_recipe(args, recipe_dict)
        sys.stderr.write("installed " + args.recipe + "\n")
    else:
        sys.stderr.write("failure installing " + args.recipe + "\n")
    sys.exit(ret)
Esempio n. 36
0
def method3(inputTuple):
    queryFirst = inputTuple[0]
    foreText = inputTuple[1]
    searchList = inputTuple[2]
    blacklist = ["is","it","to","for","the","and","but","there","their","they're","she","his","her","mine","this","then","that"]
    i = 'for'
    j = 0
    word = i
    URL = 'http://rhymebrain.com/talk?function=getRhymes&word=%s' %concat(queryFirst)
    u = urllib2.urlopen(URL)
    q = u.read()
    u.close()
    text = json.loads(q)
    index = 0
    score = 0
    stats = (index,score)
    for i in text:
        if i["freq"] > stats[1]:
            stats = (index,i["freq"])
        index += 1
    try:
        rhyme = text[stats[0]]['word']
    except:
        return ("Error","Error")
    foreText2 = setup(rhyme)
    searchList2 = foreText2.split()
    tuple = (rhyme,foreText2,searchList2)
    theoryTuple = method1(tuple)
    if theoryTuple == ("Error","Error") or rhyme.lower() == queryFirst.lower():
        return ("Error","Error")
    speech = "%s rhymes with %s . %s" %(capitalize(queryFirst),rhyme,theoryTuple[0])
    text = '"%s" rhymes with "%s." %s###foreText:%s' %(capitalize(queryFirst),rhyme,theoryTuple[1],setup(rhyme))
    outTuple = (rhyme,foreText,searchList)
    #if method4(outTuple) != ("Error","Error"):
    #    if method4(outTuple)[1][-(len(queryFirst)+2):-2].lower() != queryFirst.lower():
    #        speech1 = method4(outTuple)[0]
    #        speech = "%s rhymes with %s . %s" %(capitalize(queryFirst),rhyme,speech1)
    #        text1 = method4(outTuple)[1]
    #        text = '"%s" rhymes with "%s." %s###foreText:%s' %(capitalize(queryFirst),rhyme,text1,setup(rhyme))
    return (speech,text)
Esempio n. 37
0
def setup_pHdependentDescriptors(_descriptorDict):
    """Set up for calculation of pH dependent descriptors."""
    pH_vals = DRP.models.NumRxnDescriptorValue.objects.filter(descriptor__heading='reaction_pH', reaction__performedreaction__valid=True).exclude(
        value=None).order_by('value').values_list('value', flat=True).distinct()
    for descriptor, d in _pHDependentDescriptors.items():
        for pH in pH_vals:
            pH_string = str(pH).replace('.', '_')  # R compatibility
            d_copy = d.copy()
            d_copy['name'] += ' at pH {}'.format(pH_string)
            _descriptorDict[descriptor + '_pH{}'.format(pH_string)] = d_copy

    descriptorDict = setup(_descriptorDict)

    _cxcalcpHCommandStems = {
        'avgpol_pH{}': 'avgpol -H {}',
        'molpol_pH{}': 'molpol -H {}',
        'vanderwaals_pH{}': 'vdwsa -H {}',
        'asa_pH{}': 'molecularsurfacearea -t ASA -H {}',
        'asa+_pH{}': 'molecularsurfacearea -t ASA+ -H {}',
        'asa-_pH{}': 'molecularsurfacearea -t ASA- -H {}',
        'asa_hydrophobic_pH{}': 'molecularsurfacearea -t ASA_H -H {}',
        'asa_polar_pH{}': 'molecularsurfacearea -t ASA_P -H {}',
        'hbda_acc_pH{}': 'acceptorcount -H {}',
        'hbda_don_pH{}': 'donorcount -H {}',
        'polar_surface_area_pH{}': 'polarsurfacearea -H {}',
    }

    for key, command in _cxcalcpHCommandStems.items():
        for pH in pH_vals:
            pH_string = str(pH).replace('.', '_')  # R compatibility
            cxcalcCommands[key.format(pH_string)] = command.format(pH)

    if len(cxcalcCommands) != len(_descriptorDict):
        raise RuntimeError(
            "Need the same number of cxcalc commands as descriptors being calculated")

    return descriptorDict
Esempio n. 38
0
#!/usr/bin/env python3
from utils import filename, save2pdf, setup
from utils.plots_helper import sensors 
from matplotlib.backends.backend_pdf import PdfPages

make, fname = filename("sensor-mix-plots.pdf")
dfile = "../datasets/real/intel/sensors-1000-dirty.txt"

# e, p, t, y, x
args = [
    (0.7,2,0.05,0,1),
    (0.7,2,0.05,0,3)]

pdf = PdfPages(fname)
for (e,p,t,y,x) in args:
    title = "Outliers in Sensor Data\n"+str(p)+" Gaussians,$\\theta$=" + str(t)
    ofile = "../results/sensors_dirty_stat" + str(e) + "_mixture" + str(p) + "_" + str(t) + ".out"
    setup()
    sensors(title,x,y,dfile,ofile)
    save2pdf(pdf)
pdf.close()
Esempio n. 39
0
  def setUp(self):
    if self.vttop is None:
      raise EnvironmentError("VTTOP not defined")
    if self.vtroot is None:
      raise EnvironmentError("VTROOT not defined")
    try:
      os.makedirs(utils.tmp_root)
    except OSError:
      pass

    utils.setup()

    framework.execute('go build', verbose=utils.options.verbose, cwd=self.vttop+'/go/cmd/vtocc')
    framework.execute('go build', verbose=utils.options.verbose, cwd=self.vttop+'/go/cmd/mysqlctl')

    # start mysql
    res = subprocess.call([
        self.vtroot+"/bin/mysqlctl",
        "-tablet-uid",  self.tabletuid,
        "-port", str(self.vtoccport),
        "-mysql-port", str(self.mysqlport),
        "init"
        ])
    if res != 0:
      raise EnvironmentError("Cannot start mysql")
    res = subprocess.call([
        "mysql",
        "-S",  self.mysqldir+"/mysql.sock",
        "-u", "vt_dba",
        "-e", "create database vt_test_keyspace ; set global read_only = off"])
    if res != 0:
      raise Exception("Cannot create vt_test_keyspace database")
    dbconfig = self.mysqldir+"/dbconf.json"
    with open(dbconfig, 'w') as f:
      conf = {
          'charset': 'utf8',
          'dbname': 'vt_test_keyspace',
          'host': 'localhost',
          'unix_socket': self.mysqldir+"/mysql.sock",
          'uname': 'vt_dba',   # use vt_dba as some tests depend on 'drop'
          'keyspace': 'test_keyspace',
          'shard' : '0',
          }
      json.dump(conf, f)

    self.mysql_conn = self.mysql_connect()
    mcu = self.mysql_conn.cursor()
    self.clean_sqls = []
    self.init_sqls = []
    clean_mode = False
    with open(os.path.join(self.vttop, "test", "test_data", "test_schema.sql")) as f:
      for line in f:
        line = line.rstrip()
        if line == "# clean":
          clean_mode = True
        if line=='' or line.startswith("#"):
          continue
        if clean_mode:
          self.clean_sqls.append(line)
        else:
          self.init_sqls.append(line)
    try:
      for line in self.init_sqls:
        mcu.execute(line, {})
    finally:
      mcu.close()

    customrules = '/tmp/customrules.json'
    self.create_customrules(customrules)
    schema_override = '/tmp/schema_override.json'
    self.create_schema_override(schema_override)

    occ_args = [
      self.vtroot+"/bin/vtocc",
      "-port", "9461",
      "-dbconfig", dbconfig,
      "-customrules", customrules,
      "-schema-override", schema_override,
    ]
    if utils.options.memcache:
      memcache = self.mysqldir+"/memcache.sock"
      config = self.mysqldir+"/config.json"
      with open(config, 'w') as f:
        json.dump({"RowCache": ["memcached", "-s", memcache]}, f)
      occ_args.extend(["-queryserver-config-file", config])

    self.vtstderr = open("/tmp/vtocc_stderr.log", "a+")
    self.vtocc = subprocess.Popen(occ_args, stderr=self.vtstderr)
    for i in range(30):
      try:
        self.conn = self.connect()
        self.txlogger = subprocess.Popen(['curl', '-s', '-N', 'http://localhost:9461/debug/txlog'], stdout=open('/tmp/vtocc_txlog.log', 'w'))
        self.txlog = framework.Tailer(open('/tmp/vtocc_txlog.log', 'r'))

        def flush():
          utils.run(['curl', '-s', '-N', 'http://localhost:9461/debug/flushlogs'], trap_output=True)

        self.log = framework.Tailer(open('/tmp/vtocc.INFO'), flush=flush)
        utils.run_bg(['curl', '-s', '-N', 'http://localhost:9461/debug/querylog?full=true'], stdout=open('/tmp/vtocc_streamlog_9461.log', 'w'))
        self.querylog = framework.Tailer(open('/tmp/vtocc_streamlog_9461.log'), sleep=0.1)
        return
      except dbexceptions.OperationalError:
        if i == 29:
          raise
        time.sleep(1)
Esempio n. 40
0
  def setUp(self):
    utils.zk_setup()
    utils.setup()
    if self.vttop is None:
      raise EnvironmentError("VTTOP not defined")
    if self.vtroot is None:
      raise EnvironmentError("VTROOT not defined")

    framework.execute('go build', verbose=utils.options.verbose, cwd=self.vttop+'/go/cmd/mysqlctl')

    utils.wait_procs([self.tablet.init_mysql()])
    self.tablet.mquery("", ["create database vt_test_keyspace", "set global read_only = off"])

    self.mysql_conn, mcu = self.tablet.connect('vt_test_keyspace')
    self.clean_sqls = []
    self.init_sqls = []
    clean_mode = False
    with open(os.path.join(self.vttop, "test", "test_data", "test_schema.sql")) as f:
      for line in f:
        line = line.rstrip()
        if line == "# clean":
          clean_mode = True
        if line=='' or line.startswith("#"):
          continue
        if clean_mode:
          self.clean_sqls.append(line)
        else:
          self.init_sqls.append(line)
    try:
      for line in self.init_sqls:
        mcu.execute(line, {})
    finally:
      mcu.close()

    utils.run_vtctl('CreateKeyspace -force /zk/global/vt/keyspaces/test_keyspace')
    self.tablet.init_tablet('master', 'test_keyspace', '0')

    customrules = '/tmp/customrules.json'
    self.create_customrules(customrules)
    schema_override = '/tmp/schema_override.json'
    self.create_schema_override(schema_override)
    if utils.options.memcache:
      self.tablet.start_vttablet(memcache=True, customrules=customrules, schema_override=schema_override)
    else:
      self.tablet.start_vttablet(customrules=customrules, schema_override=schema_override)

    # FIXME(szopa): This is necessary here only because of a bug that
    # makes the qs reload its config only after an action.
    utils.run_vtctl('Ping ' + self.tablet.zk_tablet_path)

    for i in range(30):
      try:
        self.conn = self.connect()
        self.txlogger = subprocess.Popen(['curl', '-s', '-N', 'http://localhost:9461/debug/txlog'], stdout=open('/tmp/vtocc_txlog.log', 'w'))
        self.txlog = framework.Tailer(open('/tmp/vtocc_txlog.log'), flush=self.tablet.flush)
        self.log = framework.Tailer(open(os.path.join(self.tablet.tablet_dir, 'vttablet.INFO')), flush=self.tablet.flush)
        querylog_file = '/tmp/vtocc_streamlog_%s.log' % self.tablet.port
        utils.run_bg(['curl', '-s', '-N', 'http://localhost:9461/debug/querylog?full=true'], stdout=open(querylog_file, 'w'))
        time.sleep(1)
        self.querylog = framework.Tailer(open(querylog_file), sleep=0.1)

        return
      except dbexceptions.OperationalError:
        if i == 29:
          raise
        time.sleep(1)
Esempio n. 41
0
            else:
                print benchmark, 'price not retrieved, page not saved'

        else:
            # Timed out
            print benchmark, "timed out"
            pass

    return None


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="Pricebot -- update wiki page")
    parser.add_argument('-q', '--quiet', action='store_true',
                        default=False, help='Quiet non-error output')
    args = parser.parse_args()

    config = config(quiet=args.quiet)

    # Now do the work!
    url = config.setting['pricebot']['wiki_url']
    httpuser = config.setting['pricebot']['username']
    httppass = config.setting['pricebot']['password']

    site = setup(url, httpuser, httppass, config)

    # Then check if the bot is disabled, and act accordingly
    if bot_status(site) == 1:
        set_exchange_rates(site)
        set_crude_prices(site)
Esempio n. 42
0
"""An example molecular descriptor plugin to demonstrate the 'shape' that the API requires."""
# I wanted to name this module rdkit, but then we get name conflicts...
# lol python
from utils import setup
import rdkit.Chem
import DRP

calculatorSoftware = 'DRP_rdkit'

_descriptorDict = {
    'mw': {'type': 'num', 'name': 'Molecular Weight', 'calculatorSoftware': calculatorSoftware, 'calculatorSoftwareVersion': '0_02', 'maximum': None, 'minimum': 0},
}

descriptorDict = setup(_descriptorDict)

pt = rdkit.Chem.GetPeriodicTable()


def calculate_many(compound_set, verbose=False, whitelist=None):
    """Calculate in bulk."""
    for i, compound in enumerate(compound_set):
        if verbose:
            print "{}; Compound {} ({}/{})".format(compound, compound.pk, i + 1, len(compound_set))
        calculate(compound, verbose=verbose, whitelist=whitelist)


def calculate(compound, verbose=False, whitelist=None):
    """Calculate the descriptors from this plugin for a compound."""
    heading = 'mw'
    if whitelist is None or heading in whitelist:
        mw = sum(pt.GetAtomicWeight(pt.GetAtomicNumber(str(element))) * float(
Esempio n. 43
0
def confirm():
    if request.method == 'GET':
        return render_template("index.html")

##Setup##
    queryFirst = request.form["search"]
    foreText = setup(queryFirst)
    if foreText == "Error":
        errorText = '"%s" is non-existent' %capitalize(queryFirst)
        return render_template('confirm.html', articleIntro="", text="That doesnt exist, and therefore cannot be used to confirm the illuminati .", printText='"'+capitalize(queryFirst)+"\" doesn't exist, and therefore cannot be used to confirm the illuminati.", store=queryFirst, headline=errorText, bgcolor="#9e9e9e", pic="static/img/trippyBanner.jpg")
    searchList = foreText.split()
##Declaration##
    terms = (queryFirst,foreText,searchList)
    color = "#9e9e9e"
    template = 'confirm.html'
    article = "The article is: "
    titlePic = "static/img/trippyBanner.jpg"
    xtra = ""
##Method Priority List##
    methodPriority = [redFlag(terms),colorFlag(terms),method4(terms),method3(terms),method1(terms)] 
##Start the confirming##
    i = 0
    exit = 0
    if foreText == "ERROR":
        foreText = '"%s" is non-existent' %capitalize(queryFirst)
        output = '"%s" doesn\'t exist, and therefore cannot be used to confirm the illuminati.' %queryFirst
        speech = 'That doesn\'t exist, and therefore cannot be used to confirm the illuminati .'
        article=""
        exit = 1
    while i <= len(methodPriority) - 1 and exit == 0:
        text = methodPriority[i]
        if text[0] != "Error":
            speech = text[0]
            output = text[1]
            exit = 1
        if i == len(methodPriority) - 1 and text[0] == "Error":
            speech = '%s cannot be used to confirm the illuminati .' %queryFirst
            output = '"%s" cannot be used to confirm the illuminati.' %capitalize(queryFirst)
        i += 1
    print output
    out = output.split("###")[0]
    commands = output.split("###")[1:]
    variableSet = []
    if len(commands) >= 1:
        for i in commands:
            a = ''
            b = ''
            j = 0
            while j <= len(i) - 1 and b == '':
                q = i[j]
                a = a + q
                if q == ":":
                    a = a[:-1]
                    b = i[j+1:]
                j = j + 1
            variableSet.append([a,b])
    if len(variableSet) >= 1:
        i = 0
        while i <= len(variableSet) - 1:
            j = variableSet[i]
            if len(j) == 1:
                j.append("")
            if j[0] == "foreText":
                foreText = j[1]
            if j[0] == "color":
                color = j[1]
            if j[0] == "article":
                article = j[1]
            if j[0] == "speech":
                speech = j[1]
            if j[0] == "out":
                out = j[1]
            if j[0] == "queryFirst":
                queryFirst = j[1]
            if j[0] == "template":
                template = j[1]
            if j[0] == "titlePic":
                titlePic = j[1]
            if j[0] == "xtra":
                xtra = j[1]
            i += 1
    try:
        return render_template(template, articleIntro=article, text=speech, printText=out, store=queryFirst, headline=foreText, bgcolor=color, pic=titlePic, extraHTML=xtra)
    except:
        return render_template('index.html', articleIntro=article, text=speech, printText=out, store=queryFirst, headline=foreText, bgcolor=color, pic=titlePic, extraHTML=xtra)
Esempio n. 44
0
def setup():
	return utils.setup("HP5359A", "HP5359A.ROM", 1)
Esempio n. 45
0
File: drp.py Progetto: zhaojhao/DRP
def make_dict():
    """Make a dictionary of descriptors based on known patterns."""
    _reaction_pH_Descriptors = {}
    weightings = ('molarity', 'count')
    for compoundRole in DRP.models.CompoundRole.objects.all():
        for w in weightings:
            _descriptorDict['{}_amount_{}'.format(compoundRole.label, w)] = {
                'type': 'num',
                'name': 'Amount of compounds in the reaction belonging in the role "{}" weighted by {}'.format(compoundRole.label, w),
                'calculatorSoftware': calculatorSoftware,
                'calculatorSoftwareVersion': '0_02',
                'maximum': None,
                'minimum': 0,
            }
        for descriptor in DRP.models.CatMolDescriptor.objects.all():
            for w in weightings:
                for permValue in descriptor.permittedValues.all():
                    _descriptorDict['{}_{}_{}_{}'.format(compoundRole.label, descriptor.csvHeader, permValue.value, w)] = {
                        'type': 'num',
                        'name': 'Amount of reactants in category {} for descriptor "{}" weighted by reactant {} in compound role {}.'.format(
                                permValue.value, descriptor.name, w, compoundRole.label),
                        'calculatorSoftware': calculatorSoftware,
                        'calculatorSoftwareVersion': '0_02',
                        'maximum': None,
                        'minimum': 0,
                    }
        for descriptor in DRP.models.OrdMolDescriptor.objects.all():
            for w in weightings:
                # because python...
                for i in range(descriptor.minimum, descriptor.maximum + 1):
                    _descriptorDict['{}_{}_{}_{}'.format(compoundRole.label, descriptor.csvHeader, i, w)] = {
                        'type': 'num',
                        'name': 'Amount of reactants with value {} for descriptor "{}" weighted by reactant {} in compound role {}.'.format(
                            i, descriptor.name, w, compoundRole.label),
                        'calculatorSoftware': calculatorSoftware,
                        'calculatorSoftwareVersion': '0_02',
                        'maximum': None,
                        'minimum': 0,
                    }
        for descriptor in DRP.models.BoolMolDescriptor.objects.all():
            for w in weightings:
                for value in ('True', 'False'):
                    _descriptorDict['{}_{}_{}_{}'.format(compoundRole.label, descriptor.csvHeader, value, w)] = {
                        'type': 'num',
                        'name': 'Amount of reactants with value {} for descriptor "{}" weighted by reactant {} in compound role {}.'.format(
                            value, descriptor.name, w, compoundRole.label),
                        'calculatorSoftware': calculatorSoftware,
                        'calculatorSoftwareVersion': '0_02',
                        'maximum': None,
                        'minimum': 0,
                    }

            _descriptorDict['{}_{}_any'.format(compoundRole.label, descriptor.csvHeader)] = {
                'type': 'bool',
                'name': 'Whether any reactants have value True for descriptor "{}" in compound role {}.'.format(
                        descriptor.name, compoundRole.label),
                'calculatorSoftware': calculatorSoftware,
                'calculatorSoftwareVersion': '1_5',
            }

        for descriptor in DRP.models.NumMolDescriptor.objects.all():
            _descriptorDict['{}_{}_{}'.format(compoundRole.label, descriptor.csvHeader, 'Max')] = {
                'type': 'num',
                'name': 'Maximum value for {} aggregated across compounds in role "{}"'.format(descriptor.name, compoundRole.label),
                'calculatorSoftware': calculatorSoftware,
                'calculatorSoftwareVersion': '0_02',
                'maximum': descriptor.maximum,
                'minimum': descriptor.minimum,
            }
            _descriptorDict['{}_{}_{}'.format(compoundRole.label, descriptor.csvHeader, 'Range')] = {
                'type': 'num',
                'name': 'Range for {} aggregated across compounds in role "{}"'.format(descriptor.name, compoundRole.label),
                'calculatorSoftware': calculatorSoftware,
                'calculatorSoftwareVersion': '0_02',
                'maximum': (descriptor.maximum - descriptor.minimum) if (descriptor.maximum is not None and descriptor.minimum is not None) else None,
                'minimum': 0,
            }
            for w in weightings:
                _descriptorDict['{}_{}_{}_{}'.format(compoundRole.label, descriptor.csvHeader, 'gmean', w)] = {
                    'type': 'num',
                    'name': 'Geometric Mean for {} aggregated across compounds in role "{}" normalised by {}'.format(descriptor.name, compoundRole.label, w),
                    'calculatorSoftware': calculatorSoftware,
                    'calculatorSoftwareVersion': '0_02',
                    'maximum': None,
                    'minimum': 0,
                }

        for heading, d in _pHDependentDescriptors.items():
            _reaction_pH_Descriptors['{}_{}_pHreaction_{}_{}_{}'.format(compoundRole.label, heading, d['calculatorSoftware'], d['calculatorSoftwareVersion'], 'Max')] = {
                'type': 'num',
                'name': 'Maximum value for {} aggregated across compounds in role "{}"'.format(d['name'] + ' at reaction pH', compoundRole.label),
                'calculatorSoftware': calculatorSoftware,
                'calculatorSoftwareVersion': '1_5',
                'maximum': d['maximum'],
                'minimum': d['minimum'],
            }
            _reaction_pH_Descriptors['{}_{}_pHreaction_{}_{}_{}'.format(compoundRole.label, heading, d['calculatorSoftware'], d['calculatorSoftwareVersion'], 'Range')] = {
                'type': 'num',
                'name': 'Range for {} aggregated across compounds in role "{}"'.format(d['name'] + ' at reaction pH', compoundRole.label),
                'calculatorSoftware': calculatorSoftware,
                'calculatorSoftwareVersion': '1_5',
                'maximum': d['maximum'] - d['minimum'] if (d['maximum'] is not None and d['minimum'] is not None) else None,
                'minimum': 0,
            }
            for w in weightings:
                _reaction_pH_Descriptors['{}_{}_pHreaction_{}_{}_{}_{}'.format(compoundRole.label, heading, d['calculatorSoftware'], d['calculatorSoftwareVersion'], 'gmean', w)] = {
                    'type': 'num',
                    'name': 'Geometric Mean for {} aggregated across compounds in role "{}" normalised by {}'.format(d['name'] + ' at reaction pH', compoundRole.label, w),
                    'calculatorSoftware': calculatorSoftware,
                    'calculatorSoftwareVersion': '1_5',
                    'maximum': None,
                    'minimum': 0,
                }

    _descriptorDict.update(_reaction_pH_Descriptors)

    descriptorDict = setup(_descriptorDict)
    return descriptorDict, _reaction_pH_Descriptors
Esempio n. 46
0
def setup():
	return utils.setup("HP5370A", "HP5370A.ROM", -1)
Esempio n. 47
0
def try_piplsearch(person):
	dic = {}
	results = piplsearch.pipl_search({'email': person.get('email')})
	# if found some LinkedIN profiles, convert to std form
	for socmedia in [socmedia for socmedia in results if socmedia['site_name']=='LinkedIn']:
		parsed_profile = parse_a_li_profile(socmedia.get('url'), dev.get('name'))
		dic = {'url': socmedia.get('url'), 'score': 99, 'parsed_profile': parsed_profile}
		dev['li_matches'].append(dic)
	return dic



if __name__ == '__main__':
	reload(sys)
	sys.setdefaultencoding('utf-8')
	devs, log = utils.setup()
	
	
	# Use Google to find potential LinkedIn matches
	for dev in devs:
		try:
			print'\rGoogling for matches for %s...' % dev.get('name')
			dev['li_matches'] = google_for_li_matches(dev)
		except:
			print "%s occurred while processing: %s" % (sys.exc_info()[0].__name__,dev['name'])
			continue
	utils.save_as_json(devs, 'googlesearchresults')
	print "Done.\n"
	
	
	# Compare the LinkedIn profiles to the GitHub profile, score them and sort them. Return best at index[0]
Esempio n. 48
0
def setup():
	return utils.setup("HP5370B", "HP5370B.ROM", -1)
Esempio n. 49
0
File: drp.py Progetto: nihaoCC/DRP
def make_dict():
    weightings = ('molarity', 'count')
    for compoundRole in DRP.models.CompoundRole.objects.all():
        for w in weightings:
            _descriptorDict['{}_amount_{}'.format(compoundRole.label, w)] = {
                    'type': 'num',
                    'name': 'Amount of compounds in the reaction belonging in the role "{}" weighted by {}'.format(compoundRole.label, w),
                    'calculatorSoftware': 'DRP',
                    'calculatorSoftwareVersion': '0.02',
                    'maximum':None,
                    'minimum': 0
                }
        for descriptor in DRP.models.CatMolDescriptor.objects.all():
            for w in weightings:
                for permValue in descriptor.permittedValues.all():
                    _descriptorDict['{}_{}_{}_{}'.format(compoundRole.label, descriptor.csvHeader, permValue.value, w)] = {
                        'type': 'num',
                        'name': 'Amount of reactants in category {} for descriptor "{}" weighted by reactant {} in compound role {}.'.format(
                                permValue.value, descriptor.name, w, compoundRole.label),
                        'calculatorSoftware': 'DRP',
                        'calculatorSoftwareVersion': '0.02',
                        'maximum': None,
                        'minimum': None
                        }
        for descriptor in DRP.models.OrdMolDescriptor.objects.all():
            for w in weightings:
                for i in range(descriptor.minimum, descriptor.maximum+1): #  because python...
                    _descriptorDict['{}_{}_{}_{}'.format(compoundRole.label, descriptor.csvHeader, i, w)] = {
                            'type': 'num',
                            'name': 'Amount of reactants with value {} for descriptor "{}" weighted by reactant {} in compound role {}.'.format(
                                    i, descriptor.name, w, compoundRole.label),
                            'calculatorSoftware': 'DRP',
                            'calculatorSoftwareVersion': '0.02',
                            'maximum': None,
                            'minimum': None
                        }
            for descriptor in DRP.models.BoolMolDescriptor.objects.all():
                for w in weightings:
                    for value in ('True', 'False'):
                        _descriptorDict['{}_{}_{}_{}'.format(compoundRole.label, descriptor.csvHeader, value, w)] = {
                                'type': 'num',
                                'name': 'Amount of reactants with value {} for descriptor "{}" weighted by reactant {} in compound role {}.'.format(
                                        value, descriptor.name, w, compoundRole.label),
                                'calculatorSoftware': 'DRP',
                                'calculatorSoftwareVersion': '0.02',
                                'maximum': None,
                                'minimum': None,
                            }
            for descriptor in DRP.models.NumMolDescriptor.objects.all():
                _descriptorDict['{}_{}_{}'.format(compoundRole.label, descriptor.csvHeader, 'Max')] = {
                    'type': 'num',
                    'name': 'Maximum value for {} aggregated across compounds in role "{}"'.format(descriptor.name, compoundRole.label),
                    'calculatorSoftware': 'DRP',
                    'calculatorSoftwareVersion': 'DRP',
                    'maximum': None,
                    'minimum': None
                    }
                _descriptorDict['{}_{}_{}'.format(compoundRole.label, descriptor.csvHeader, 'Range')] = {
                    'type': 'num',
                    'name': 'Range for {} aggregated across compounds in role "{}"'.format(descriptor.name, compoundRole.label),
                    'calculatorSoftware': 'DRP',
                    'calculatorSoftwareVersion': 'DRP',
                    'maximum': None,
                    'minimum': None
                    }
                for w in weightings:
                    _descriptorDict['{}_{}_{}_{}'.format(compoundRole.label, descriptor.csvHeader, 'gmean', w)] = {
                        'type': 'num',
                        'name': 'Geometric Mean for {} aggregated across compounds in role "{}" normalised by {}'.format(descriptor.name, compoundRole.label, w),
                        'calculatorSoftware': 'DRP',
                        'calculatorSoftwareVersion': 'DRP',
                        'maximum': None,
                        'minimum': None
                        }
    descriptorDict = setup(_descriptorDict)
    return descriptorDict
Esempio n. 50
0
def setup_app():
    utils.setup(app)