示例#1
0
def parse_stonks(content: str):
    # stonks and stinks (aka badstonks)
    lines = content.splitlines()
    args = parse_arguments(content)

    stonks = {
        'cap_sep': [],
        'head': '',
        'flip': args['flip'],  # horizontal flip
        'bad': False,
        'custom_text': ''
    }
    for line in lines:
        # remove zero-width spaces and leading/trailing whitespace
        naked_line = line.replace('\u200b', '').strip()
        # caption and sep (can only be added *before* meme body)
        cap = parse_caption(naked_line)
        if cap is not None:
            stonks['cap_sep'].append(('cap', cap))
        elif is_sep(naked_line):
            stonks['cap_sep'].append(('sep', ''))

        for idx, emj in enumerate([':stonks: ', ':badstonks: ']):
            if (naked_line.startswith(emj)
                    and naked_line.replace(emj, '', 1).strip()):
                words = naked_line.replace(emj, '', 1).split()
                if words and is_in_emoji_form(words[0]):
                    stonks['head'] = words[0].strip(':')
                    stonks['bad'] = True if idx == 1 else False
                    if len(words) > 1:
                        # custom text present
                        stonks['custom_text'] = ' '.join(words[1:])
                    return (stonks, '')
    return (None, None)
示例#2
0
def test_parser_is_module_allowed():

    cmt.ARGS = args.parse_arguments(['url', 'cpu'])

    assert args.is_module_allowed_in_args('url') is True
    assert args.is_module_allowed_in_args('cpu') is True
    assert args.is_module_allowed_in_args('swap') is False
示例#3
0
def main():
    # get hyper parameters
    args = parse_arguments()

    # load data
    print("\nLoading data...")
    TEXT = data.Field(lower=True, batch_first=True)
    LABEL = data.Field(sequential=False)
    train_iter, val_iter = imdb(TEXT, LABEL, args.batch_size)

    # update args
    args.n_vocab = n_vocab = len(TEXT.vocab)
    args.n_classes = n_classes = len(LABEL.vocab) - 1
    args.cuda = torch.cuda.is_available()
    args.kernel_sizes = [int(k) for k in args.kernel_sizes.split(',')]
    args.save_dir = os.path.join(
        args.save_dir,
        datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))

    # print args
    print("\nParameters:")
    for attr, value in sorted(args.__dict__.items()):
        print("\t{}={}".format(attr.upper(), value))

    # initialize/load the model
    if args.snapshot is None:
        classifier = classifiers[args.model]
        classifier = classifier(args, n_vocab, args.embed_dim, n_classes,
                                args.dropout)
    else:
        print('\nLoading model from [%s]...' % args.snapshot)
        try:
            classifier = torch.load(args.snapshot)
        except:
            print("Sorry, This snapshot doesn't exist.")
            exit()
    if args.cuda:
        classifier = classifier.cuda()

    # train, test, or predict
    if args.predict is not None:
        label = predict(classifier, args.predict, TEXT, LABEL)
        print('\n[Text]  {}[Label] {}\n'.format(args.predict, label))
    elif args.test:
        try:
            evaluate(classifier, test_iter, args)
        except Exception as e:
            print("\nSorry. The test dataset doesn't  exist.\n")
    else:
        print()
        train(classifier, train_iter, val_iter, args)
示例#4
0
def run():
    args = parse_arguments()

    print(args)

    env = gym.make(args.environment)
    env.seed(args.seed or 0)

    reward = 0
    done = False

    agent = None
    if args.model == 'obs-policy':
        agent = ObsPolicyModel(args.convs or 1, 16,
                               env.observation_space.shape,
                               env.action_space.n,
                               args.layers or 1,
                               args.neurons or 32)
        print('observ_space:', env.observation_space.shape)
        print('action_space:', env.action_space.n)
        agent.build()

    if agent == None:
        raise('You have chosen an unimplented model/agent.')

    for i in range(args.episode_count):
        total_reward = 0
        print(f'Episode #{str(i)} reward: {total_reward}')
        obs = env.reset()
        agent.reset()
        render_episode = i % args.render == 0 if args.render else False
        while True:
            if render_episode:
                env.render()
            action = agent.act(obs, reward, done)
            obs, reward, done, _ = env.step(action)
            total_reward += reward
            if done:
                agent.act(obs, reward, done)
                break

     # Close the env and write monitor result info to disk
    env.close()
示例#5
0
    # raise Exception("Timed out!")
    print("Timed out ! (max_execution_time)")
    sys.exit()


# ------------
# Main entry
# ------------

if __name__ == "__main__":

    if not sys.version_info >= (3, 6):
        print("Should use Python >= 3.6")
        sys.exit()

    cmt.ARGS = args.parse_arguments(sys.argv[1:])

    if cmt.ARGS["version"]:
        print(cmt.VERSION)
        sys.exit()

    err = args.get_invalid_modules_in_args()
    if len(err) > 0:
        print("ERR - Unknow module(s)  : " + ','.join(err))
        sys.exit()

    # conf.yml,  conf.d/*.yml
    cmt.CONF = conf.load_conf()

    # if cron mode, introduce a small uase (offset) tô spread the load on metrology servers
    if cmt.ARGS['cron']:
示例#6
0
def test_parser_is_module_list_valid_in_args():

    cmt.ARGS = args.parse_arguments(['url', 'cpu'])
    assert args.is_module_list_valid_in_args() is True
示例#7
0
def test_parser_help():

    with pytest.raises(SystemExit):
        r = args.parse_arguments(['-h'])
示例#8
0
def test_parser_cron():

    r = args.parse_arguments(['--cron'])
    assert r['cron'] is True
示例#9
0
def test_parser_empty():

    r = args.parse_arguments([])
    assert r['cron'] is False
    assert r['short'] is False
    assert r['persist'] is False
示例#10
0
# Level 2: An exponential-time naive recursive implementation which counts
#          sequences without generating them explicitly.

from neighbors import neighbors

def count_sequences(start_position, num_hops):
    if num_hops == 0:
        return 1
    
    num_sequences = 0
    for position in neighbors(start_position):
        num_sequences += count_sequences(position, num_hops - 1)
    return num_sequences


if __name__ == '__main__':
    import args
    a = args.parse_arguments()
    print(count_sequences(a.start_position, a.num_hops))
            "decoder_start_token_id": target_tokenizer.cls_token_id,
            "num_beams": args.num_beams,
            "num_return_sequences": args.num_outputs,
            "no_repeat_ngram_size": 0
        }
        if args.type_forcing:
            generate_args["type_forcing_vocab"] = type_forcing_vocab

        outputs = bert2arsenal.generate(**generate_args)

        # apparently batch instances and return sequences per instance are stacked along a single dimension
        for j in range(batch_size):
            input = [t for t in batch["input_ids"][j] if t != 0]
            true_seq = [t for t in batch['labels'][j] if t != -100]
            outfile.write(f"{input}\t{true_seq}")
            for k in range(j * args.num_outputs, (j + 1) * args.num_outputs):
                pred_seq = [t for t in outputs[k].tolist() if t != 0]
                outfile.write(f"\t{pred_seq}")
            outfile.write("\n")
        outfile.flush()
    outfile.close()


#

if __name__ == "__main__":

    args = parse_arguments(sys.argv[1:])
    print(tabulate(vars(args).items(), headers={"parameter", "value"}))
    generate_predictions(args)
示例#12
0
		inform("Exception during stopping monitor %s " % e)
	finally:
		inform("BYEEE!")

atexit.register(cleanup)



if __name__ == "__main__":
	global monitor
	global DIR
	global flush_frequency

	monitor = None
	signals_to_handle = [signal.SIGINT,signal.SIGHUP,signal.SIGQUIT,signal.SIGABRT,signal.SIGALRM]
	args = parse_arguments()
	DIR = ""
	flush_frequency = args.flush
	monitor = FileTaskMonitor(workers=args.max_proc)

	def signal_handler(signal, frame):
		inform( "Handling signal %s %s"%(signal,frame ))
#		monitor.stop()
		sys.exit(0)


	DIR = args.input_dir


	for os_signal in signals_to_handle:
		signal.signal(os_signal, signal_handler)
示例#13
0
#!/usr/bin/python3
from args import parse_arguments
import controller

if __name__ == '__main__':
  args = parse_arguments()
  controller.run(exchange_after=args.exchange, 
    generations=args.generations, cities=args.cities, population_size=args.chromosomes,
    elite_size=args.elite, mutation_probability=args.mprobability,
    independent_populations=args.populations, number_workers=args.workers,
    verbose=args.verbose, latex=args.latex)

示例#14
0
from args import parse_arguments
import multiprocessing
import os
import webbrowser
import time

params = parse_arguments()

if params.num_workers == -1:
    num_workers = multiprocessing.cpu_count(
    )  # Set workers to number of available CPU threads
else:
    num_workers = params.num_workers

launcher = 'tensorboard --logdir='

for worker in range(num_workers):
    launcher += 'worker_' + str(worker) + ':train_' + str(worker) + '/,'

if __name__ == '__main__':
    os.system(launcher)
    time.sleep(3)
    webbrowser.open('http://localhost:6006', new=2)
示例#15
0
def prepare(toot: str, emojis={}, instance='', saveto='') -> tuple:
    # return meme type and parsed info if toot is meme
    # else, return 'not a meme' and None.
    blessed = uncurse(toot)
    if not blessed.strip():
        return ('empty', None, None)

    arguments = parse_arguments(blessed)

    # overall priority:
    # font > mono > lang
    # arguments here are filled with default values in args.py
    # mono overrides lang (if mono is present, the elif will not be entered)
    if arguments['mono']:
        font = MONO
    elif arguments['lang'] in LANGS:
        font = LANGS[arguments['lang']]
    else:
        return ('language not supported', None, None)

    stroke = False
    if arguments['font'] in FONTS:
        # font overrides mono and lang
        font = FONTS[arguments['font']]
        if arguments['font'] == 'impact':
            stroke = True  # render text in its stroke (outline)
    elif arguments['font'] == 'unchanged':
        # default value in args.py
        pass
    else:
        return ('font not supported', None, None)


    drakes, desc = parse_drake(blessed)
    if drakes:
        # is drake meme (or at least a portion thereof)
        return ('Drake', {
            'drakes': drakes,
            'emojis': emojis,
            'font': font,
            'stroke': stroke,
            'instance': instance,
            'saveto': saveto
        }, desc)  # return meme type and parsed info

    # uh oh, is not drake
    brains, desc = parse_brainsize(blessed)
    if brains:
        # is brain size meme
        return ('Brain Size', {
            'brains': brains,
            'emojis': emojis,
            'font': font,
            'stroke': stroke,
            'instance': instance,
            'saveto': saveto
        }, desc)

    stonks, desc = parse_stonks(blessed)
    if stonks:
        # is stonks meme
        return ('Stonks', {
            'stonks': stonks,
            'emojis': emojis,
            'font': font,
            'stroke': stroke,
            'instance': instance,
            'saveto': saveto
        }, desc)

    woman_yelling, desc = parse_woman_yelling(blessed)
    if woman_yelling:
        # is woman yelling meme
        return ('Woman Yelling', {
            'entities': woman_yelling,
            'emojis': emojis,
            'font': font,
            'stroke': stroke,
            'instance': instance,
            'saveto': saveto
        }, desc)
    # NOTE TO SELF: do not forget to add proxy to def memethesis()
    # when adding a new meme type
    return ('not a meme', None, None)