def main(argv):
    usage = 'Usage: generate_typescript_interfaces.py --inspector-scripts-path <path to JavaScriptCore/inspector/scripts> --combined-domains-path <path to CombinedDomains.json> --output-dir <result directory path>'
    if len(argv) != 6:
        print usage
        sys.exit(2)
    
    inspector_scripts_path = ''
    combined_domains_path = ''
    output_dir = ''
    try:
      opts, args = getopt.getopt(argv,"h",["help", "inspector-scripts-path=","combined-domains-path=", "output-dir="])
    except getopt.GetoptError:
      print usage
      sys.exit(2)
    for opt, arg in opts:
      if opt in ("-h", "--help"):
         print usage
         sys.exit()
      elif opt == "--inspector-scripts-path":
         inspector_scripts_path = arg
      elif opt == "--combined-domains-path":
         combined_domains_path = arg
      elif opt == "--output-dir":
        output_dir = arg

    sys.path.append(inspector_scripts_path)
    
    from generate import generate
    generate(combined_domains_path, output_dir)
Ejemplo n.º 2
0
def process(clean_word):
    w, new = word.get_word(clean_word)
    if w.completed():
        logging.info('Processing COMPLETED for %s' % clean_word)
        return

    state = status.get_latest_state(clean_word)

    logging.info('PROCESSING %s [%i]' % (w.word(), state))

    # Dispatch word to appropriate processing stage.
    if state < 10:
        analyse.analyse(w, state)
    elif state < 20:
        generate.generate(w, state)
    else:
        # Completion state!
        state = 808


    # Persist latest word payload if needed.
    w.persist_payload()

    # Queue up next processing step if required.
    if not w.completed():
        deferred.defer(process, clean_word)
Ejemplo n.º 3
0
def cleanup(fn):
    '''
    clean up what rename() might have left, generate mp3s afterwards
    '''
    
    for f in glob("./*.mp3"):
        os.remove(f)
    generate(fn)
Ejemplo n.º 4
0
def submit():
    if request.method == 'POST':
        date = request.json["startDate"]
        painted = request.json["painted"]
        user_full_name = request.json.get("user_full_name", None)
        user_email = request.json.get("user_email", None)
        print date
        print pretty_canvas(painted)
        generate(test_repo(), date, painted,
                 author=user_full_name, email=user_email)
        return "OK"
Ejemplo n.º 5
0
def check_hashes():
	modified = False
	for f in glob.glob(os.path.join(config.get("sourcedir"), "*.md")):
		md5 = hashlib.md5()
		md5.update(open(f, 'rb').read())
		h = md5.hexdigest()
		if file_hashes[f] != h:
			file_hashes[f] = h
			modified = True
	if modified:
		print "regenerating"
		generate.generate()
Ejemplo n.º 6
0
def upload_article():
    """上传文件
    1. 保存至本地
    2. md转换为html
    3. 重新加载索引信息
    """
    f = request.files["md_file"]
    f_name = secure_filename(f.filename)
    f.save(os.path.join(INPUT_CONTENT, f_name))
    generate()
    IndexData.reload_index_data()
    return redirect(url_for("page_articles"))
Ejemplo n.º 7
0
def generate_index():
    """生成索引信息
    """
    try:
        generate()
        IndexData.reload_index_data()
        return jsonify({
            "msg": "ok"
        })
    except Exception as e:
        current_app.logger.exception(e)
        return jsonify({
            "msg": "failed"
        })
Ejemplo n.º 8
0
 def testAll(self):
     for buildout in self.buildouts:
         buildout_string = to_string(buildout)
         generated_buildout = to_string(render(generate(buildout)))
         if buildout_string != generated_buildout:
             print "Test Case Failure!"
             print "Input"
             print etree.tostring(generate(buildout),pretty_print=True)
             print buildout_string
             print ""
             print "!="
             print "Generated"
             print 
             print generated_buildout
             self.fail()
Ejemplo n.º 9
0
def main():
	global batch_run
	for i in range(1):
		batch_run = open("batch_run.txt","w")
		width = 100
		numstr = str(width)
		numstr="0"*(3-len(numstr))+numstr
		run_parameter(str(numstr))
		batch_run.close()
		generate.width2 = width
		generate.generate(numstr)
	#	time.sleep(.3)numstr
		os.system("cat batch_run.txt | ./1D\ Poisson"  )
		print numstr
	#	time.sleep(.1)
		os.system("mv Qwell"+str(numstr)+"* energy_data7")
Ejemplo n.º 10
0
 def compile(self, rdql):
     from lex import Lexer
     from yacc import Parser
     self.ast = Parser().parse(rdql, lexer=Lexer())
     self.ast = resolve(self.ast) 
     select, count, self.ast = generate(self.ast)
     return select, count
Ejemplo n.º 11
0
def create_generation(num):
	if num <= 0: raise IndexError(str(num) + " is too small. Must be > 0.")
	pool = proc.Pool(mp.max_num_workers)
	
	#We now have fitness function output for every program in this directory.
	#Lets arrange them by the fittest discarding the stupid ones.
	ordered_candidates = filtered_filenames(str(num-1))
	partners = choose_partners(ordered_candidates)
	
	#Breeding time.
	
	result = pool.map_async(breed_fittest, partners)
	result.wait()	
	children = result.get()
	
	
	num_randgen = random.randint(mp.min_new_functions(), mp.max_new_functions())
	for i in xrange(num_randgen):
		children.append(generate.generate(gp.num_rnd_instrs))
	
	os.mkdir(gen_dir(num))
	
	for child, i in zip(children, xrange(len(children))):
		fh = open(gen_member(num, i), 'w')
		child = [str(word) for word in child]
		child.append("\n")
		fh.write(''.join(child))
		fh.close()
Ejemplo n.º 12
0
def main():
    return generate(
        ("Nullable Instance", generate_nullable_instance),
        ("Type Cache", generate_type_cache),
        ("Type Casts", generate_type_casts),
        ("Enum Casts", generate_enum_casts),
    )
def main():
    return generate(
        ("LightLambda Run Methods", gen_run_methods),
        ("Dynamic Instructions", gen_instructions),
        ("Dynamic Instruction Types", gen_types),
        ("Untyped Dynamic Instructions", gen_untyped),
    )
    def setUpClass(cls):

        if config.debug:
            stream_handler.setLevel(logging.DEBUG)
        else:
            stream_handler.setLevel(logging.INFO)

        cls._randomize_bd()

        # Check `config.script_folder` permissions
        if (
            subprocess.check_output(
                config.cmd_env_stat_permissions_s % (config.script_folder),
                shell=True).strip()
            != config.script_folder_expected_perms
            ):
            raise DevException(
                "Error: give to the http user full permissions to the folder \'%s\'"
                % config.script_folder
            )

        obfuscated = generate(cls.password)

        tmp_handler, tmp_path = tempfile.mkstemp()
        save_generated(obfuscated, tmp_path)
        subprocess.check_call(
            config.cmd_env_move_s_s % (tmp_path, cls.path),
            shell=True)

        subprocess.check_call(
            config.cmd_env_chmod_s_s % ('777', cls.path),
            shell=True)
Ejemplo n.º 15
0
    def test_build(self):
        """ Try to invoke the build target of each demo """

        # TODO: please use bouwer modules directly instead? e.g. bouwer.execute()

        for lang in os.listdir(self.demodir):
            for demo in os.listdir(self.demodir + os.sep + lang):
                os.chdir(self.demodir + os.sep + lang + os.sep + demo)

                if os.path.exists('generate.py'):
                    import generate
                    generate.generate()

                result = os.system(self.bouwer + ' -qf')
                self.assertEqual(result, 0, 'building ' + os.getcwd() + ' failed')
                result = os.system(self.bouwer + ' -c')
                self.assertEqual(result, 0, 'cleaning ' + os.getcwd() + ' failed')
Ejemplo n.º 16
0
    def test_generators(self):

        for i in range(0, 500):
            self._randomize_bd()
            obfuscated = generate(self.password)
            save_generated(obfuscated, self.path)
            self.channel = Channel(self.url, self.password, 'StegaRef')
            self._clean_bd()
Ejemplo n.º 17
0
def initialize():
    add_gitignore()
    status = generate.generate()
    if status["code"] == 0:
        print "ERROR: could not generate config.oscm, " + str(status.error_desc)
        return 0
    # oscm_prompt.prompt()
    return 1
Ejemplo n.º 18
0
def meta_generated(alg):
	for i in range(1, 10):
		for _ in range(1, 10):
			g = generate(i)
			fvs = alg(g, i)
			assert fvs != None
			assert is_fvs(g, fvs)
			assert len(fvs) == i
Ejemplo n.º 19
0
def main():
    generators = [
        ('Set Operations (Keys)', gen_ops('DictionaryKeyView')),
        ('Set Comparison Operations (Keys)', gen_comps('DictionaryKeyView')),
        ('Set Operations (Items)', gen_ops('DictionaryItemView')),
        ('Set Comparison Operations (Items)', gen_comps('DictionaryItemView')),
    ]
    
    return generate(*generators)
Ejemplo n.º 20
0
def affixate(text):
    word_list=token_text(text);
    import generate
    if len(word_list)==0:
        return u'';
    else:
        for word in word_list:
            list_gen_words=generate.generate(word);
        return list_gen_words;
Ejemplo n.º 21
0
def main():
    return generate(
        ("Expression Debugger Proxies", gen_debug_proxies),
        ("Expression Tree Node Types", gen_tree_nodes),
        ("Checked Operations", gen_checked_ops),
        ("Binary Operation Binder Validator", gen_binop_validator),
        ("Unary Operation Binder Validator", gen_unop_validator),
        ("StackSpiller Switch", gen_stackspiller_switch),
        ("Expression Compiler", gen_compiler),
    )
Ejemplo n.º 22
0
def main():
    if not os.path.exists(assembly_file):
        raise Exception('No file found at {0}'.format(assembly_file))

    tokens = []
    line_num = 1

    with open(assembly_file) as f:
        for line in f:
            try:
                sanitized = sanitize_line(line)

                if sanitized:
                    tokens.append(parse.parse_line(sanitized, line_num))
            except parse.ParseException as e:
                raise parse.ParseException('Error at line number {0}: {1}, {2}'.format(line_num, line.strip(), str(e)))

            line_num += 1

    generate.generate(tokens)
Ejemplo n.º 23
0
def affixate(text):
    """
    generate all affixed froms from a word
    """
    word_list = token_text(text)
    import generate
    if len(word_list) == 0:
        return u''
    else:
        for word in word_list:
            list_gen_words = generate.generate(word)
        return list_gen_words
Ejemplo n.º 24
0
def main():
    gens = [
        ("ToPython Exception Helper", gen_topython_helper),
        ("Exception Factories", factory_gen),
        ("Python New-Style Exceptions", newstyle_gen),
        ("builtin exceptions", builtin_gen),
    ]

    for e in pythonExcs:
        gens.append((get_clr_name(e), gen_one_exception_maker(e)))

    return generate(*gens)
Ejemplo n.º 25
0
def main():
    return generate(
        ("Func Types", gen_func_types),
        ("Action Types", gen_action_types),
        ("UpdateAndExecute Methods", gen_update_targets),
        ("Delegate Action Types", gen_delegate_action),
        ("Delegate Func Types", gen_delegate_func),
        ("Maximum Delegate Arity", gen_max_delegate_arity),
# outer ring generators
        ("Delegate Microsoft Scripting Action Types", gen_delegate_action),
        ("Delegate Microsoft Scripting Scripting Func Types", gen_delegate_func),
        
    )
Ejemplo n.º 26
0
def main():
    temp_list = [   ("Expression Tree Node Types", gen_tree_nodes),
                    ("Checked Operations", gen_checked_ops),
                    ("Binary Operation Binder Validator", gen_binop_validator),
                    ("Unary Operation Binder Validator", gen_unop_validator),
                    ("StackSpiller Switch", gen_stackspiller_switch),
                    ("Expression Compiler", gen_compiler)
                    ]
    import System
    if System.Environment.Version.Major<4:
        temp_list.append(("Expression Debugger Proxies", gen_debug_proxies))
    
    return generate(*temp_list)
Ejemplo n.º 27
0
    def setUpClass(cls):

        cls._randomize_bd()

        obfuscated = generate(cls.password)

        tmp_handler, tmp_path = tempfile.mkstemp()
        save_generated(obfuscated, tmp_path)
        subprocess.check_call(
            config.cmd_env_move_s_s % (tmp_path, cls.path),
            shell=True)

        subprocess.check_call(
            config.cmd_env_chmod_s_s % ('777', cls.path),
            shell=True)
Ejemplo n.º 28
0
def main():
    # add other generation functions to this list
    generate.register_generation({
        "basic" : easy1.generate,
        "who" : who.generate,
        "whose" : who.generate_plural,
        "which" : which.generate#,
        })

    fname = sys.argv[1]
    nquestions = int(sys.argv[2])

    paras = []

    f = open(fname)
    line = f.readline()
    # take only real paragraphs, not titles or tables
    while line != "":
        if len(line) > 70:
            paras += [line]
        line = f.readline()
    f.close()

    all_qs = []
    qs = []

    while (len(qs) < nquestions) and (len(paras) > 0):
        i = random.randint(0, len(paras) - 1)
        p = paras.pop(i)
        parsed = parse.parse(p)
        all_qs += filter(lambda x: (type(x) == tuple) and 
                                   (x[0] != None) and 
                                   (lib.wc(x[0]) > 5),
                         generate.generate(parsed))
        flips = [random.randint(0, 2) for i in xrange(len(all_qs))]
        new_qs = []
        for i in xrange(len(flips)):
            if flips[i] == 0:
                qs += [all_qs[i][0]]
            else:
                new_qs += [all_qs[i]]
        all_qs = new_qs

    lib.pretty_print(qs)
    lib.pretty_print(all_qs)
    questions = map (lib.format, qs)
    for q in questions:
        print q
Ejemplo n.º 29
0
def generate_model(p=30, p_zero=0.7, type=None):
    DEBUG = True
    '''
    this function creates a model in order to generate
    data according to it's distribution.
    and in order to compare the approximated model to the original
    distribution
    '''
    if not type:
        adjmat = random_graph(p, p_zero)
    if type == 'grid':
        adjmat = grid_graph(p)
        adjmat.nodes()
        adjmat.nodes_iter()
    gen = generate(adj_mat=adjmat)
    return gen
Ejemplo n.º 30
0
def main():
    return generate(
        ("Python Constant Folding", gen_constant_folding),
        ("Python Fast Ops RetBool Chooser", fast_op_ret_bool_chooser),
        ("Python Fast Ops Ret Bool", fast_op_ret_bool),
        ("Tokenize Ops", tokenize_generator),
        ("Token Kinds", tokenkinds_generator),
        ("Tokens", tokens_generator),
        ("Table of Operators", gen_OperatorTable),
        ("PythonOperator Mapping", gen_operatorMapping),
        #("OperatorToSymbol", gen_OperatorToSymbol),
        ("StringOperatorToSymbol", gen_StringOperatorToSymbol),
        ("WeakRef Operators Initialization", weakref_operators),
        ("OldInstance Operators", oldinstance_operators),
        #("Operator Reversal", operator_reversal),
        ("WeakRef Callable Proxy Operators Initialization", weakrefCallabelProxy_operators),
    )
Ejemplo n.º 31
0
def main():
    return generate(
        ("IntOps", gen_all),
    )
Ejemplo n.º 32
0
# File: randomWalk.py
import sys
from generate import generate

n = int(sys.argv[2])
a = int(sys.argv[1])
output = generate(a, n)
print output
Ejemplo n.º 33
0
def main():

    args = get_config()

    #---display model type---#
    print('-' * 80)
    print('# Training Conditional SpecGAN!') if args.conditional else print(
        '# Training SpecGAN!')
    print('-' * 80)

    #---make train dir---#
    if args.conditional: args.train_dir = args.train_dir + '_cond'
    if not os.path.isdir(args.train_dir):
        os.makedirs(args.train_dir)

    #---save args---#
    with open(os.path.join(args.train_dir, 'args.txt'), 'w') as f:
        f.write('\n'.join([
            str(k) + ',' + str(v)
            for k, v in sorted(vars(args).items(), key=lambda x: x[0])
        ]))

    #---make model kwarg dicts---#
    setattr(
        args, 'SpecGAN_g_kwargs', {
            'kernel_len': args.SpecGAN_kernel_len,
            'dim': args.SpecGAN_dim,
            'use_batchnorm': args.SpecGAN_batchnorm,
            'upsample': args.SpecGAN_genr_upsample,
            'initializer': args.SpecGAN_model_initializer,
        })
    setattr(
        args, 'SpecGAN_d_kwargs', {
            'kernel_len': args.SpecGAN_kernel_len,
            'dim': args.SpecGAN_dim,
            'use_batchnorm': args.SpecGAN_batchnorm,
            'initializer': args.SpecGAN_model_initializer,
        })

    #---collect path to data---#
    if args.mode == 'train' or args.mode == 'moments':
        fps = glob.glob(
            os.path.join(args.data_dir, args.data_tfrecord_prefix) +
            '*.tfrecord')

    #---load moments---#
    if args.mode != 'moments' and args.data_moments_file is not None:
        while True:
            try:
                print('# Moments: Loading existing moments file...')
                with open(os.path.join(args.train_dir, args.data_moments_file),
                          'rb') as f:
                    _mean, _std = pickle.load(f)
                    break
            except:
                print(
                    '# Moments: Failed to load, computing new moments file...')
                moments(fps, args)
        setattr(args, 'data_moments_mean', _mean)
        setattr(args, 'data_moments_std', _std)

    #---run selected mode---#

    #---run generate mode--#
    if args.mode == 'train':
        infer(args, cond=args.conditional)
        train(fps, args, cond=args.conditional)
    elif args.mode == 'generate':
        infer(args, cond=args.conditional)
        generate(args, cond=args.conditional)
    elif args.mode == 'moments':
        moments(fps, args)
    elif args.mode == 'preview':
        preview(args)
    elif args.mode == 'incept':
        incept(args)
    elif args.mode == 'infer':
        infer(args)
    else:
        raise NotImplementedError()
Ejemplo n.º 34
0
def main():
    return generate(
        ("IntOps", gen_int),
        ("FloatOps", gen_float),
    )
Ejemplo n.º 35
0
def main():
    return generate(
        ("Tuples", gen_tuples),
        ("Tuple Get From Size", gen_get_size),
    )
Ejemplo n.º 36
0
else:
    device = 'cpu'
print('   Found ' + device + '.')

print('>> Loading perturbation...')
# generate perturbation v of 224*224*3 of [-10,10] directly on original image.
file_perturbation = 'data/universal.npy'
if os.path.isfile(file_perturbation) == 0:
    print('   No perturbation found, computing...')

    print('>> Checking dataset...')
    if not os.path.exists(args.PATH):
        print("Data set not found. please check!")
        sys.exit()
    print('   Done.')
    v = generate(args.PATH, 'dataset4u-trn.txt', 'dataset4u-val.txt', net, max_iter_uni=10, delta=0.1, p=np.inf, num_classes=25, overshoot=0.1, max_iter_df=500, xi=args.xi, batch_size=args.batch_size)
    # Saving the universal perturbation
    np.save('./data/universal.npy', v)
else:
    print('   Found a pre-computed universal perturbation at', file_perturbation)
    v = np.load(file_perturbation)


testimg = "./data/test_im4.jpg"
print('>> Testing the universal perturbation on', testimg)
labels = open('./data/labels.txt', 'r').read().split('\n')
testimgToInput = Image.open(testimg).convert('RGB')
pertimgToInput = np.clip(cut(testimgToInput)+v, 0, 255)
pertimg = Image.fromarray(pertimgToInput.astype(np.uint8))

img_orig = transform(testimgToInput)
Ejemplo n.º 37
0
               [p[1] for p in dataset if p[2] < 0], 'ro')
    pylab.contour(xrange,
                  yrange,
                  grid, (-1.0, 0.0, 1.0),
                  colors=('red', 'black', 'blue'),
                  linewidths=(1, 1, 1))
    pylab.show()


if __name__ == '__main__':
    # datasets = generate.generate3(points=25)
    # kernels = (
    #    linear_kernel,
    #    lambda x, y: poly_kernel(x, y, 4),
    #    lambda x, y: radial_kernel(x, y, 0.54)
    #    #lambda x, y: sigmoid_kernel(x, y, 0.1, 0.3)
    # )

    # for d in datasets:
    #    for k in kernels:
    #        continue
    # plot_classification(d, k)

    # dataset = generate.generate(points=25)
    # for g in range(2, 10, 2):
    #    plot_classification(dataset, lambda x, y: radial_kernel(x, y, g))

    data = generate.generate(points=25)
    for p in range(5, 10):
        plot_classification(data, lambda x, y: poly_kernel(x, y, p))
Ejemplo n.º 38
0
            if query_result == 1:
                print("Target found at [%s,%s], %s." %
                      (current_i, current_j, map[current_i][current_j]))
                print("Totoal Search Steps:%d" % search_count)
                return search_count, move_count
            else:
                # print("Search [%d,%d]" % (query_i,query_j))
                makeMove(map, beliefmap, current_i, current_j)
                for i in range(dim):
                    for j in range(dim):
                        if beliefmap[i][j] < 0:
                            raise Exception("ERROR")


if __name__ == '__main__':
    game = generate(50)
    map = game.get('board')
    target_x = game.get('target').row
    target_y = game.get('target').col
    searchstep1 = 0
    searchstep2 = 0
    movestep1 = 0
    movestep2 = 0

    policy = 1
    for i in range(10):
        searchsteps, movesteps = probablisticSearch(map, target_x, target_y, 1)
        searchstep1 += searchsteps
        movestep1 += movesteps
        print(searchsteps, movesteps)
Ejemplo n.º 39
0
import argparse
import generate

parser = argparse.ArgumentParser()
parser.add_argument('tag', help='The desired clan tag')

if __name__ == '__main__':
    args = parser.parse_args()
    generate.generate(args.tag)
Ejemplo n.º 40
0
def create_tweet():
    """Create the text of the tweet you want to send."""
    text = generate()
    return text
Ejemplo n.º 41
0
def main():
    return generate(
        ("Python AST Walker", gen_python_walker),
        ("Python AST Walker Nonrecursive", gen_python_walker_nr),
    )
Ejemplo n.º 42
0
    parser.add_argument('-d',
                        '--dataset-path',
                        type=str,
                        help='The dataset to use')

    parser.add_argument('-dt',
                        '--dataset-type',
                        type=str,
                        default='cars',
                        help='The dataset to use')

    FLAGS, unparsed = parser.parse_known_args()

    # Check if cuda is available
    FLAGS.cuda = FLAGS.cuda and torch.cuda.is_available()

    # Get the desired pretrained models for the dataset
    '''
    if not FLAGS.dataset is None:
        FLAGS.dpath = '/'.join(FLAGS.dpath.split('/')[:2] + [FLAGS.dataset] + [FLAGS.dpath.split('/')[3]])
        FLAGS.gpath = '/'.join(FLAGS.gpath.split('/')[:2] + [FLAGS.dataset] + [FLAGS.gpath.split('/')[3]])
        '''

    if FLAGS.mode == 'train':
        train(FLAGS)
    elif FLAGS.mode == 'predict':
        generate(FLAGS)
    else:
        raise RuntimeError('Invalid value passed for mode. \
                Valid arguments are: "train" and "test"')
Ejemplo n.º 43
0
def main():
    return generate(*GetGeneratorList())
Ejemplo n.º 44
0
def main():
    return generate(("Symbols - Other Symbols", generate_symbols), )
Ejemplo n.º 45
0
from keras.models import load_model
from keras.datasets import fashion_mnist
import numpy as np
from process.ssim import get_ssim
from generate import generate

# load model
model = load_model("model.hdf5")

# load test data and labels
(train_data, train_labels), (t_data, test_labels) = fashion_mnist.load_data()
test_data = np.load("test_data/test_data.npy")
generate_data = test_data

# generate attack data
attack_data = generate(generate_data, (len(generate_data), 28, 28, 1))


# judge where attack success
def attack_success(prev, attack):
    prev = prev.reshape((1, prev.shape[0], prev.shape[1], prev.shape[2]))
    attack = attack.reshape(
        (1, attack.shape[0], attack.shape[1], attack.shape[2]))
    prev_pred = np.argmax(model.predict(prev))
    current_pred = np.argmax(model.predict(attack))
    return prev_pred != current_pred


# attack success
success_count = 0
success_index = []
Ejemplo n.º 46
0
def main():
    return generate(("Reflected Caller", gen_all), )
Ejemplo n.º 47
0
    #on chip memory test
    genModules = generate(); #store all names of generated modules
    genModules.onChipMem("d","wren","clk","write_addr","read_addr","q",newFile)
    newFile.writeTopModule(genModules) #connect all generated modules inside a top_module
"""

if __name__ == "__main__":
    """
    ring osc prompt
    """

    #creates new file with name "ro.sv"
    newFile = writeToFile("ro.sv")

    #store all names of generated modules
    genModules = generate()

    #ask user for number of ring oscillators
    while True:
        try:
            c = int(input("Please enter the number of ring oscillators:"))
        except ValueError or c <= 0:
            continue
        else:
            break

    #for each ring oscillator, ask user for specifications
    for i in range(int(c)):
        print("\nRO #{num}:".format(num=str(i + 1)))

        in_port = input(
Ejemplo n.º 48
0
from rule import Rule
from generate import generate
from validate import validate
# 已知:有斑点、长脖子、长腿、有奶、有蹄。 询问:这是什么动物呢?

if __name__ == '__main__':
    file = open('input.txt', 'r')
    list_lines = file.readlines()
    target = list_lines[0].split(' ')
    rules = []
    num = int(list_lines[1])
    for i in range(2, 2 + num):
        rule = list_lines[i]
        condition, result = rule.split('#')
        condition = condition.split(' ')
        result = result[:-1]
        rules.append(Rule(condition, result))
    num2 = int(list_lines[2 + num])
    for i in range(2 + num + 1, 2 + num + 1 + num2):
        conds = list_lines[i].split(' ')
        conds[-1] = conds[-1][:-1]
        print('generate:')
        generate(target, rules, conds)
        print('')
        print('validate:')
        validate(target, rules, conds)
Ejemplo n.º 49
0
def main():
    return generate(
        ("Python AST Walker", gen_python_walker),
        ("Python AST Walker Nonrecursive", gen_python_walker_nr),
        ("Python Name Binder Propagate Current Scope", gen_python_name_binder),
    )
Ejemplo n.º 50
0
def solve(opts, msg):

    if opts['generate']:
        logging.info('Operation started: generation.')
        import sys
        sys.path.append('./lib')

        from generate import generate
        count = 0
        for each in generate()[0]:
            letters[count] = each[0]
            count += 1
        ALF = generate()[1]

        print letters
        print ALF
        logging.info('SUCCEEDED: puzzle generation.')

    if opts['publish']:

        # 140 character limit

        # 22 characters
        import time
        import os

        date = time.strftime("%x")
        header = 'Beehive for ' + date + ':' + '\n'

        hive = [0] * 5
        hive[0] = '   \   ' + str(letters[1]).upper() + '  /' + '\n'
        hive[1] = str(letters[2]).upper() + '   \    /   ' + str(
            letters[3]).upper() + '\n'
        hive[2] = '---(  ' + str(letters[0]).upper() + '  )---' + '\n'
        hive[3] = str(letters[4]).upper() + '   /    \   ' + str(
            letters[5]).upper() + '\n'
        hive[4] = '    /  ' + str(letters[6]).upper() + '   \\'

        footer = '\nALF: ' + str("%.2f" % ALF)

        if len(msg) > 1:
            top_message = msg
            payload = top_message + '\n' + header + hive[0] + hive[1] + hive[
                2] + hive[3] + hive[4] + footer
        else:
            payload = header + hive[0] + hive[1] + hive[2] + hive[3] + hive[
                4] + footer

        print payload
        logging.info('SUCCEEDED: Twitter publication.')

    if opts['solution']:
        logging.info('Operation started: solution generation.')

        f = open('./lib/wordlist.txt', 'r')
        uncheck_words = []

        letter_anag = ''
        for each in letters:
            letter_anag += each

        # Can you think of any word with the same letter appearing more than 5 times?
        letter_anag += letter_anag + letter_anag + letter_anag + letter_anag + letter_anag

        def anagramchk(word, chkword):
            for letter in word:
                if letter in chkword:
                    chkword = chkword.replace(letter, '', 1)
                else:
                    return 0
            return 1

        for line in f:
            for line in f:
                word = line.strip()
                if anagramchk(word, letter_anag):
                    uncheck_words.append(word)
        f.close()

        logging.info('SUCCEEDED: Unchecked word solutions.')
        logging.info('Operation started: Checking word solutions for reqs.')

        words = []

        target_letter = set(letters[0])
        for each in uncheck_words:
            if target_letter & set(each):
                words.append(each)

        solutions_file = './solutions/beehive' + str(date.replace('/',
                                                                  '')) + '.txt'

        f = open(solutions_file, 'w')
        f.write('Daily Beehive solution for ' + date + ':\n')
        f.close()
        f = open(solutions_file, 'a')
        for each in words:
            f.write(each + '\n')
        f.close()

        logging.info('SUCCEEDED: Solution generation accounting for reqs.')

        logging.info('Operation started: Uploading solution via git')

        os.system('git add ' + solutions_file)
        os.system('git commit -m "added solutions file"')
        os.system('git push origin master')

        logging.info('SUCCEEDED: Uploaded solutions to GitHub.')

    if opts['twitter']:
        logging.info('Operation started: Twitter publication.')

        payload += '\nSolved: https://raw.githubusercontent.com/aaronsdevera/dailybeehive/master/prod/solutions/beehive' + str(
            date.replace('/', '')) + '.txt'

        import twitter
        from twitter_api_keys import KEYS

        api = twitter.Api(consumer_key=KEYS[0],
                          consumer_secret=KEYS[1],
                          access_token_key=KEYS[2],
                          access_token_secret=KEYS[3])

        status = api.PostUpdate(payload)
        logging.info('SUCCEEDED: Posted to Twitter.')
Ejemplo n.º 51
0
def generate_midi(config_folder, score_source, number_of_version, duration_gen,
                  rhythmic_reconstruction, logger_generate):
    """This function generate the orchestration of a midi piano score
    
    Parameters
    ----------
    config_folder : str
        Absolute path to the configuration folder, i.e. the folder containing the saved model and the results
    score_source : str
        Either a path to a folder containing two midi files (piano and orchestration) or the path toa piano midi files
    number_of_version : int
        Number of version generated in a batch manner. Since the generation process involves sampling it might be interesting to generate several versions
    duration_gen : int
        Length of the generated score (in number of events). Useful for generating only the beginning of the piece.
    rhythmic_reconstruction: bool
        Whether rythmic reconstrcution from event-level representation to frame-level reconstrcution is performed or not. If true is selected, the rhtyhmic structure of the original piano score is used.
    logger_generate : logger
        Instanciation of logging. Can be None
    """

    logger_generate.info("#############################################")
    logger_generate.info("Orchestrating piano score : " + score_source)
    ############################################################
    # Load model, config and data
    ############################################################

    ########################
    # Load config and model
    parameters = pkl.load(open(config_folder + '/script_parameters.pkl', 'rb'))
    model_parameters = pkl.load(open(config_folder + '/model_params.pkl',
                                     'rb'))
    # Set a minimum seed size, because for very short models you don't event see the beginning
    seed_size = max(model_parameters['temporal_order'], 10) - 1
    quantization = parameters['quantization']
    temporal_granularity = parameters['temporal_granularity']
    instru_mapping = parameters['instru_mapping']
    ########################

    ########################
    # Load data
    if re.search(r'mid$', score_source):
        pr_piano, event_piano, duration_piano, name_piano, pr_orch, instru_orch, duration = load_solo(
            score_source, quantization, parameters["binarize_piano"],
            temporal_granularity)
    else:
        pr_piano, event_piano, duration_piano, name_piano, pr_orch, instru_orch, duration = load_from_pair(
            score_source, quantization, parameters["binarize_piano"],
            parameters["binarize_orch"], temporal_granularity)
    ########################

    ########################
    # Shorten
    # Keep only the beginning of the pieces (let's say a 100 events)
    pr_piano = extract_pianoroll_part(pr_piano, 0, duration_gen)
    if parameters["duration_piano"]:
        duration_piano = np.asarray(duration_piano[:duration_gen])
    else:
        duration_piano = None
    event_piano = event_piano[:duration_gen]
    pr_orch = extract_pianoroll_part(pr_orch, 0, duration_gen)
    ########################

    ########################
    # Instanciate piano pianoroll
    N_piano = instru_mapping['Piano']['index_max']
    pr_piano_gen = np.zeros((duration_gen, N_piano), dtype=np.float32)
    pr_piano_gen = build_data_aux.cast_small_pr_into_big_pr(
        pr_piano, {}, 0, duration_gen, instru_mapping, pr_piano_gen)
    pr_piano_gen_flat = pr_piano_gen.sum(axis=1)
    silence_piano = [
        e for e in range(duration_gen) if pr_piano_gen_flat[e] == 0
    ]
    ########################

    ########################
    # Instanciate orchestra pianoroll with orchestra seed
    N_orchestra = parameters['N_orchestra']
    if pr_orch:
        pr_orchestra_gen = np.zeros((seed_size, N_orchestra), dtype=np.float32)
        orch_seed_beginning = {k: v[:seed_size] for k, v in pr_orch.items()}
        pr_orchestra_gen = build_data_aux.cast_small_pr_into_big_pr(
            orch_seed_beginning, instru_orch, 0, seed_size, instru_mapping,
            pr_orchestra_gen)
        pr_orchestra_truth = np.zeros((duration_gen, N_orchestra),
                                      dtype=np.float32)
        pr_orchestra_truth = build_data_aux.cast_small_pr_into_big_pr(
            pr_orch, instru_orch, 0, duration_gen, instru_mapping,
            pr_orchestra_truth)
    else:
        pr_orchestra_gen = None
        pr_orchestra_truth = None
    ########################

    #######################################
    # Embed piano
    time_embedding = time.time()
    if parameters['embedded_piano']:
        # Load model
        embedding_path = parameters["embedding_path"]
        embedding_model = embedDenseNet(380, 12, (1500, 500), 100, 1500, 2, 3,
                                        12, 0.5, 0, False, True)
        embedding_model.load_state_dict(torch.load(embedding_path))

        # Build embedding (no need to batch here, len(pr_piano_gen) is sufficiently small)
        piano_resize_emb = np.zeros(
            (len(pr_piano_gen), 1, 128))  # Embeddings accetp size 128 samples
        piano_resize_emb[:, 0, instru_mapping['Piano']['pitch_min']:
                         instru_mapping['Piano']['pitch_max']] = pr_piano_gen
        piano_resize_emb_TT = torch.tensor(piano_resize_emb)
        piano_embedded_TT = embedding_model(piano_resize_emb_TT.float(), 0)
        pr_piano_gen_embedded = piano_embedded_TT.numpy()
    else:
        pr_piano_gen_embedded = pr_piano_gen
    time_embedding = time.time() - time_embedding
    #######################################

    ########################
    # Inputs' normalization
    normalizer = pkl.load(
        open(os.path.join(config_folder, 'normalizer.pkl'), 'rb'))
    if parameters["embedded_piano"]:  # When using embedding, no normalization
        pr_piano_gen_norm = pr_piano_gen_embedded
    else:
        pr_piano_gen_norm = normalizer.transform(pr_piano_gen_embedded)
    ########################

    ########################
    # Store folder
    string = re.split(r'/', name_piano)[-1]
    name_track = re.sub('piano_solo.mid', '', string)
    generated_folder = config_folder + '/generation_reference_example/' + name_track
    if not os.path.isdir(generated_folder):
        os.makedirs(generated_folder)
    ########################

    ########################
    # Get trainer
    with open(os.path.join(config_folder, 'which_trainer'), 'r') as ff:
        which_trainer = ff.read()
    # Trainer
    if which_trainer == 'standard_trainer':
        from LOP.Scripts.standard_learning.standard_trainer import Standard_trainer as Trainer
        kwargs_trainer = {'temporal_order': model_parameters["temporal_order"]}
    elif which_trainer == 'NADE_trainer':
        from LOP.Scripts.NADE_learning.NADE_trainer import NADE_trainer as Trainer
        kwargs_trainer = {
            'temporal_order': model_parameters["temporal_order"],
            'num_ordering': model_parameters["num_ordering"]
        }
    else:
        raise Exception("Undefined trainer")
    trainer = Trainer(**kwargs_trainer)
    ########################

    ############################################################
    # Generate
    ############################################################
    time_generate_0 = time.time()
    generated_sequences = {}
    for measure_name in parameters['save_measures']:
        model_path = 'model_' + measure_name
        generated_sequences[measure_name] = generate(
            trainer,
            pr_piano_gen_norm,
            silence_piano,
            duration_piano,
            config_folder,
            model_path,
            pr_orchestra_gen,
            batch_size=number_of_version)
    time_generate_1 = time.time()
    logger_generate.info(
        'TTT : Generating data took {} seconds'.format(time_generate_1 -
                                                       time_generate_0))

    ############################################################
    # Reconstruct and write
    ############################################################
    def reconstruct_write_aux(generated_sequences, prefix):
        for write_counter in range(generated_sequences.shape[0]):
            # To distinguish when seed stop, insert a sustained note
            this_seq = generated_sequences[write_counter] * 127
            this_seq[:seed_size, 0] = 20
            # Reconstruct
            if rhythmic_reconstruction:
                pr_orchestra_clean = from_event_to_frame(this_seq, event_piano)
            else:
                pr_orchestra_clean = this_seq
            pr_orchestra = instrument_reconstruction(pr_orchestra_clean,
                                                     instru_mapping)
            # Write
            write_path = generated_folder + '/' + prefix + '_' + str(
                write_counter) + '_generated.mid'
            if rhythmic_reconstruction:
                write_midi(pr_orchestra, quantization, write_path, tempo=80)
            else:
                write_midi(pr_orchestra, 1, write_path, tempo=80)
        return

    for measure_name in parameters["save_measures"]:
        reconstruct_write_aux(generated_sequences[measure_name], measure_name)

    ############################################################
    ############################################################
    # Write original orchestration and piano scores, but reconstructed version, just to check
    if rhythmic_reconstruction:
        A = from_event_to_frame(pr_piano_gen, event_piano)
    else:
        A = pr_piano_gen
    B = A * 127
    piano_reconstructed = instrument_reconstruction_piano(B, instru_mapping)
    write_path = generated_folder + '/piano_reconstructed.mid'
    if rhythmic_reconstruction:
        write_midi(piano_reconstructed, quantization, write_path, tempo=80)
    else:
        write_midi(piano_reconstructed, 1, write_path, tempo=80)
    #
    if rhythmic_reconstruction:
        A = from_event_to_frame(pr_orchestra_truth, event_piano)
    else:
        A = pr_orchestra_truth
    B = A * 127
    orchestra_reconstructed = instrument_reconstruction(B, instru_mapping)
    write_path = generated_folder + '/orchestra_reconstructed.mid'
    if rhythmic_reconstruction:
        write_midi(orchestra_reconstructed, quantization, write_path, tempo=80)
    else:
        write_midi(orchestra_reconstructed, 1, write_path, tempo=80)
def main():
    return generate.generate(
        ("Exception Factory", gen_expr_factory_core),
        ("Com Exception Factory", gen_expr_factory_com),
        ("Microsoft.Scripting Exception Factory", gen_expr_factory_scripting),
    )
Ejemplo n.º 53
0
def main():
    return generate(
        ("math functions", gen_funcs)
    )
Ejemplo n.º 54
0
    alg_pars = {}
    alg_pars['densest_eps'] = 0.1
    alg_pars['dp_eps'] = 0.1

    res = np.empty((len(innerdegree_range), len(noise_range), 9))

    for j in xrange(len(innerdegree_range)):
        generator_pars['innerdegree'] = innerdegree_range[j]
        print 'inner noise (avg. degree of the planted graph):', innerdegree_range[
            j]
        for i in xrange(len(noise_range)):
            print 'outer noise (avg. degree of the background network):', noise_range[
                i]
            noise = noise_range[i]
            generator_pars['noise'] = noise_range[i]
            TS, backNoise, innerNoise, generated_C = generate.generate(
                generator_pars)

            st = time.time()
            ADP = ApproxDP(alg_pars['dp_eps'], generator_pars['k'], TS)
            ADP.run_DP(alg_pars['densest_eps'])
            el_time = time.time() - st
            graphs, densities, intervals = ADP.get_sol_graphs()
            nq = get_node_quality(graphs, generated_C)
            iq = get_interval_quality(intervals, generated_C)
            res[j, i, 0:3] = nq
            res[j, i, 3:6] = iq
            res[j, i, 6] = sum(densities)
            print 'total density', res[j, i, 6]
            print
            res[j, i, 7] = get_true_density(generated_C)
            res[j, i, 8] = el_time
Ejemplo n.º 55
0
from generate import generate

file_name = "outofsample1.csv"
# file_name = "oos_w_fund_mom.csv"
generate(file_name, 2008)
generate(file_name, 2009)
generate(file_name, 2010)
generate(file_name, 2011)
generate(file_name, 2012)
Ejemplo n.º 56
0
def main():
    return generate(
        ("TypeCache Storage", gen_typecache_storage),
        ("TypeCache Entries", gen_typecache),
    )
def generate_midi(config_folder_fd, config_folder_bd, score_source,
                  save_folder, initialization_type, number_of_version,
                  duration_gen, number_fdbd_pass, logger_generate):
    """This function generate the orchestration of a midi piano score
    
    Parameters
    ----------
    config_folder : str
        Absolute path to the configuration folder, i.e. the folder containing the saved model and the results
    score_source : str
        Either a path to a folder containing two midi files (piano and orchestration) or the path toa piano midi files
    number_of_version : int
        Number of version generated in a batch manner. Since the generation process involves sampling it might be interesting to generate several versions
    duration_gen : int
        Length of the generated score (in number of events). Useful for generating only the beginning of the piece.
    logger_generate : logger
        Instanciation of logging. Can be None
    """

    logger_generate.info("#############################################")
    logger_generate.info("Orchestrating : " + score_source)

    # Load parameters
    parameters = pkl.load(
        open(config_folder_fd + '/script_parameters.pkl', 'rb'))
    model_parameters_fd = pkl.load(
        open(config_folder_fd + '/model_params.pkl', 'rb'))

    parameters_bd = pkl.load(
        open(config_folder_bd + '/script_parameters.pkl', 'rb'))
    model_parameters_bd = pkl.load(
        open(config_folder_bd + '/model_params.pkl', 'rb'))

    assert (model_parameters_fd["temporal_order"] ==
            model_parameters_bd["temporal_order"]
            ), "The two model have different seed_size"
    assert (parameters["quantization"] == parameters_bd["quantization"]
            ), "The two model have different quantization"
    assert (parameters["temporal_granularity"] ==
            parameters_bd["temporal_granularity"]
            ), "The two model have different temporal_granularity"
    assert (parameters["instru_mapping"] == parameters_bd["instru_mapping"]
            ), "The two model have different instru_mapping"
    assert (parameters["normalizer"] == parameters_bd["normalizer"]
            ), "The two model have different normalizer"

    seed_size = max(model_parameters_fd['temporal_order'], 10) - 1

    #######################
    # Load data
    if re.search(r'mid$', score_source):
        pr_piano, event_piano, duration_piano, name_piano, pr_orch, instru_orch, duration = generation_utils.load_solo(
            score_source, parameters["quantization"],
            parameters["binarize_piano"], parameters["temporal_granularity"])
    else:
        if initialization_type == "seed":
            pr_piano, event_piano, duration_piano, name_piano, pr_orch, instru_orch, duration = generation_utils.load_from_pair(
                score_source,
                parameters["quantization"],
                parameters["binarize_piano"],
                parameters["binarize_orch"],
                parameters["temporal_granularity"],
                align_bool=True)
        else:
            pr_piano, event_piano, duration_piano, name_piano, pr_orch, instru_orch, duration = generation_utils.load_from_pair(
                score_source,
                parameters["quantization"],
                parameters["binarize_piano"],
                parameters["binarize_orch"],
                parameters["temporal_granularity"],
                align_bool=False)

    if (duration is None) or (duration < duration_gen):
        logger_generate.info("Track too short to be used")
        return
    ########################

    ########################
    # Shorten
    # Keep only the beginning of the pieces (let's say a 100 events)
    pr_piano = pianoroll_processing.extract_pianoroll_part(
        pr_piano, 0, duration_gen)
    if parameters["duration_piano"]:
        duration_piano = np.asarray(duration_piano[:duration_gen])
    else:
        duration_piano = None
    if parameters["temporal_granularity"] == "event_level":
        event_piano = event_piano[:duration_gen]
    pr_orch = pianoroll_processing.extract_pianoroll_part(
        pr_orch, 0, duration_gen)
    ########################

    ########################
    # Instanciate piano pianoroll
    N_piano = parameters["instru_mapping"]['Piano']['index_max']
    pr_piano_gen = np.zeros((duration_gen, N_piano), dtype=np.float32)
    pr_piano_gen = build_data_aux.cast_small_pr_into_big_pr(
        pr_piano, {}, 0, duration_gen, parameters["instru_mapping"],
        pr_piano_gen)
    pr_piano_gen_flat = pr_piano_gen.sum(axis=1)
    silence_piano = [
        e for e in range(duration_gen) if pr_piano_gen_flat[e] == 0
    ]
    ########################

    ########################
    # Initialize orchestra pianoroll with orchestra seed (choose one)
    N_orchestra = parameters['N_orchestra']
    pr_orchestra_truth = np.zeros((duration_gen, N_orchestra),
                                  dtype=np.float32)
    pr_orchestra_truth = build_data_aux.cast_small_pr_into_big_pr(
        pr_orch, instru_orch, 0, duration_gen, parameters["instru_mapping"],
        pr_orchestra_truth)
    if initialization_type == "seed":
        pr_orchestra_seed = generation_utils.init_with_seed(
            pr_orch, number_of_version, seed_size, N_orchestra, instru_orch,
            parameters["instru_mapping"])
    elif initialization_type == "zeros":
        pr_orchestra_seed = generation_utils.init_with_zeros(
            number_of_version, seed_size, N_orchestra)
    elif initialization_type == "constant":
        const_value = 0.1
        pr_orchestra_seed = generation_utils.init_with_constant(
            number_of_version, seed_size, N_orchestra, const_value)
    elif initialization_type == "random":
        proba_activation = 0.5
        pr_orchestra_seed = generation_utils.init_with_random(
            number_of_version, seed_size, N_orchestra, proba_activation)
    ########################

    #######################################
    # Embed piano
    time_embedding = time.time()
    if parameters['embedded_piano']:
        # Load model
        embedding_path = parameters["embedding_path"]
        embedding_model = torch.load(embedding_path, map_location="cpu")

        # Build embedding (no need to batch here, len(pr_piano_gen) is sufficiently small)
        # Plus no CUDA here because : afradi of mix with TF  +  possibly very long piano chunks
        piano_resize_emb = np.zeros(
            (len(pr_piano_gen), 1, 128))  # Embeddings accetp size 128 samples
        piano_resize_emb[:, 0, parameters["instru_mapping"]['Piano']
                         ['pitch_min']:parameters["instru_mapping"]['Piano']
                         ['pitch_max']] = pr_piano_gen
        piano_resize_emb_TT = torch.tensor(piano_resize_emb)
        piano_embedded_TT = embedding_model(piano_resize_emb_TT.float(), 0)
        pr_piano_gen_embedded = piano_embedded_TT.numpy()
    else:
        pr_piano_gen_embedded = pr_piano_gen
    time_embedding = time.time() - time_embedding
    #######################################

    ########################
    # Inputs' normalization
    normalizer = pkl.load(
        open(os.path.join(config_folder_fd, 'normalizer.pkl'), 'rb'))
    if parameters["embedded_piano"]:  # When using embedding, no normalization
        pr_piano_gen_norm = pr_piano_gen_embedded
    else:
        pr_piano_gen_norm = normalizer.transform(pr_piano_gen_embedded)
    ########################

    ########################
    # Store folder
    string = re.split(r'/', name_piano)[-1]
    name_track = re.sub('piano_solo.mid', '', string)
    generated_folder = save_folder + '/fd_bd_' + initialization_type + '_init/' + name_track
    if not os.path.isdir(generated_folder):
        os.makedirs(generated_folder)
    ########################

    ########################
    # Get trainer
    with open(os.path.join(config_folder_fd, 'which_trainer'), 'r') as ff:
        which_trainer_fd = ff.read()
    # Trainer
    trainer_fd = import_trainer(which_trainer_fd, model_parameters_fd,
                                parameters)
    #
    with open(os.path.join(config_folder_bd, 'which_trainer'), 'r') as ff:
        which_trainer_bd = ff.read()
    # Trainer
    trainer_bd = import_trainer(which_trainer_bd, model_parameters_bd,
                                parameters)
    ########################

    ############################################################
    # Generate
    ############################################################
    time_generate_0 = time.time()
    for pass_index in range(number_fdbd_pass):
        model_path = 'model_accuracy'
        pr_orchestra_gen = generate(trainer_fd,
                                    pr_piano_gen_norm,
                                    silence_piano,
                                    duration_piano,
                                    config_folder_fd,
                                    model_path,
                                    pr_orchestra_seed,
                                    batch_size=number_of_version)
        pr_orchestra_seed = pr_orchestra_gen[:, -seed_size:]
        prefix_name = 'fd_' + str(pass_index) + '_'
        generation_utils.reconstruct_generation(pr_orchestra_gen, event_piano,
                                                generated_folder, prefix_name,
                                                parameters, seed_size)
        pr_orchestra_gen = generate_backward(trainer_bd,
                                             pr_piano_gen_norm,
                                             silence_piano,
                                             duration_piano,
                                             config_folder_bd,
                                             model_path,
                                             pr_orchestra_seed,
                                             batch_size=number_of_version)
        pr_orchestra_seed = pr_orchestra_gen[:, :seed_size]
        prefix_name = 'bd_' + str(pass_index) + '_'
        generation_utils.reconstruct_generation(pr_orchestra_gen, event_piano,
                                                generated_folder, prefix_name,
                                                parameters, seed_size)

    time_generate_1 = time.time()
    logger_generate.info(
        'TTT : Generating data took {} seconds'.format(time_generate_1 -
                                                       time_generate_0))

    ############################################################
    # Reconstruct and write
    ############################################################
    prefix_name = 'final_'
    generation_utils.reconstruct_generation(pr_orchestra_gen, event_piano,
                                            generated_folder, prefix_name,
                                            parameters, seed_size)
    generation_utils.reconstruct_original(pr_piano_gen, pr_orchestra_truth,
                                          event_piano, generated_folder,
                                          parameters)
    return
Ejemplo n.º 58
0
        input_size = vocab.size
        output_size = vocab.size

    model = LanguageModel('RNN', input_size, hidden_size, output_size)

    # create criterion and optimiser
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    # training loop
    every_n = int(total_n / 100)
    running_loss = 0
    losses = []
    for i, (batch, labels) in enumerate(
            generate.generate('train',
                              token=token,
                              max_len=max_len,
                              small=small)):

        # one hot encode
        if token == 'character':
            batch = generate.one_hot_encode(batch, vocab)
        # or embed
        elif token == 'word':
            batch = generate.w2v_encode(batch, emb, vocab)

        # turn into torch tensors
        batch = torch.Tensor(batch)
        labels = torch.Tensor(labels).long()

        # zero the gradients
        optimizer.zero_grad()
Ejemplo n.º 59
0
def update_index():
    from generate import generate
    generate()
Ejemplo n.º 60
0
from generate import generate
import os
import subprocess
import sys

if (not os.path.exists("build")):
    os.mkdir("build")
else:
    for r, _, fl in os.walk("build"):
        for f in fl:
            os.remove(os.path.join(r, f))
if ("--generate" in sys.argv):
    print("Generating SHA1 Checking Code...")
    generate()
tmp = os.getcwd()
os.chdir("build")
if ("--gpu" in sys.argv):
    if ("--release" in sys.argv):
        if (subprocess.run([
                "nvcc", "-O3", "-Xptxas", "-v,-O3", "-use_fast_math", "-D",
                "_NVCC", "-D", "NDEBUG", "-D", "_WINDOWS", "-D", "_UNICODE",
                "-D", "UNICODE", "-o", "gpu_cracker.exe",
                "../src/gpu_cracker/gpu_cracker.cu", "-I",
                "../src/gpu_cracker/include"
        ]).returncode != 0):
            os.chdir(tmp)
            sys.exit(1)
    else:
        if (subprocess.run([
                "nvcc", "-G", "-use_fast_math", "-D", "_NVCC", "-D", "_DEBUG",
                "-D", "_WINDOWS", "-D", "_UNICODE", "-D", "UNICODE", "-o",