Ejemplo n.º 1
0
def convertipv4(mysql_object,college_tablename= None,num_config= None,start_id= None,college_filename= None,correct_filename= None):
    """
    @description :将纯真IP数据库内的地址细分为省市区
    ---------
    @params :num_config : 每次处理ip信息的记录数, 默认为20000.
             start_id : 处理ip信息的起始记录索引值, 默认为1.
             college_tablename : mySQL中IP数据库的大学信息表的表名,默认为"college_info".
             college_filename : 输出大学数据的json文件名或路径,默认为"./tmp/college.json".
             correct_filename : 自定义纠错文件的json文件名或路径,默认为"../data/correct.json".
    -------
    @Returns  :None
    -------
    """
    if num_config == None:
        num_config = 20000 
    if start_id == None:
        start_id = 1
    if college_tablename == None:
        college_tablename = 'college_info'
    if college_filename == None:
        college_filename =  os.path.abspath(tmp_dir+os.path.sep+"college.json")
    if correct_filename == None:
        correct_filename =  os.path.abspath(data_dir+os.path.sep+"correct.json")
        file_set(correct_filename)
    
    convert(mysql_object,college_tablename,num_config,start_id,college_filename,correct_filename)
Ejemplo n.º 2
0
def initial():
    run_command("mkdir " + universal.pdf_folder)
    run_command("mkdir " + universal.tag_folder)
    temp = universal.filename  #assigning filename to temp
    no_of_pages = burstpdf()
    logwriter.logwrite("\n********" + "\n" + temp + "\n*************\n")
    if no_of_pages == 0:
        logwriter.logwrite("No pages in this pdf\n")
        logwriter.logwrite("********" + "\n" + temp + "\n*************\n")
        return 0
    i = 0
    excelwriter.init()
    while i < no_of_pages:  #loop for locating first patent file
        universal.filename = str(i)
        convert.convert()  #for initializing conversion of files
        i += 1
        if parser.begin() != -1:
            excelwriter.loop()
            mysql.loop()
            break
    universal.flag = 1  #Process of extraction will start
    print(universal.con)
    while i < no_of_pages:
        universal.filename = str(i)
        convert.convert()  #for initializing conversion of files
        if parser.begin() == -1:
            i += 1
            continue
        excelwriter.loop()
        mysql.loop()
        i += 1
    universal.workbook.close()
    run_command("rm -r " + universal.pdf_folder)
    run_command("rm -r " + universal.tag_folder)
    logwriter.logwrite("********" + "\n" + temp + "\n*************\n")
Ejemplo n.º 3
0
def add_single(conn, pathfile):
    """ add a single song to database """
    if pathfile.endswith(".mp3"):
            # read metadata
            tup = c.meta(pathfile)
            d.add_song(tup, conn)
            log.info('metadata recorded in the database')
            # convert mp3 to wav
            c.convert(pathfile)
            log.info('audio converted to wav')
            # read the wav from local directory
            filename = os.path.basename(pathfile)
            pathwav = "./music/wav/" + filename[:-3] + "wav"
             # compute spectrogram and fingerprints
            framerate, f, t, spect = a.spectrogram(pathwav)
            fingerprints1 = a.fingerprint(f, spect)
            fingerprints2 = a.fingerprint2(f, spect, framerate)
            song_id = d.select_songid(filename, conn)
            log.info('audio file no. %s recorded in the database', song_id)
            # add fingerprints to database
            d.add_fingerprint(filename, t, fingerprints1, fingerprints2, conn)
            # update fingerprinted status
            d.update_fingerprinted(song_id, conn)

            print('Done!', filename, 'added to your database ❤')
Ejemplo n.º 4
0
    def copyconvert(line):
        fp = line.strip()
        # skip current file if filepath matches any of the given regex
        skip = True in [s in fp for s in skips]

        if not skip:
            logging.debug(f'Attempting to transfer {fp}')
            ori_fp = fp
            name, ext = os.path.splitext(fp)
            ori_ext = ext

            if ori_ext[1:] in conversions:
                ext = '.' + conversions[ori_ext[1:]]

            rel_path = os.path.relpath(name + ext, src)
            abs_path = os.path.join(dest, rel_path)

            # skip file if it exists in destination folder
            if os.path.exists(abs_path):
                print(f'{abs_path} already exists!')
                return

            if ori_ext[1:] in conversions:
                print(f'Converting {fp} to {ext[1:]}')
                convert(fp, ext[1:])
                print(f'Converted {fp} to {ext[1:]}')

            # create folders if they dont exist
            os.makedirs(os.path.dirname(abs_path), exist_ok=True)

            # copy file
            copyfile(fp, abs_path)
            logging.debug(f'Transferred {ori_fp} to {abs_path}')
            success.append(abs_path)
Ejemplo n.º 5
0
def init_config(config, defaults, version, specs):

    if len(config.config) == 0:
        config.config.update(copy.deepcopy(defaults))
        set_version(config, version)

    file_ver = get_version(config)
    ver = file_ver
    while ver != version:
        if ver < version:
            key = (ver, ver + 1)
        else:
            key = (ver, ver - 1)

        spec = specs.get(key)
        if spec:
            convert(spec, config)
            ver = get_version(config)
        else:
            raise ValueError(
                "Config file conversion v%s -> v%s not supported" %
                (file_ver, version))

    for key in config.config.keys():
        if key not in defaults:
            del config.config[key]

    return file_ver
Ejemplo n.º 6
0
def train(logdir1='logdir/default/train1', logdir2='logdir/default/train2', queue=True):
    model = Model(mode="train2", batch_size=hp.Train2.batch_size, queue=queue)

    # Loss
    loss_op = model.loss_net2()

    # Training Scheme
    global_step = tf.Variable(0, name='global_step', trainable=False)

    optimizer = tf.train.AdamOptimizer(learning_rate=hp.Train2.lr)
    with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
        var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'net/net2')
        train_op = optimizer.minimize(loss_op, global_step=global_step, var_list=var_list)

    # Summary
    summ_op = summaries(loss_op)

    session_conf = tf.ConfigProto(
        gpu_options=tf.GPUOptions(
            allow_growth=True,
            per_process_gpu_memory_fraction=0.6,
        ),
    )
    # Training
    with tf.Session(config=session_conf) as sess:
        # Load trained model
        sess.run(tf.global_variables_initializer())
        model.load(sess, mode='train2', logdir=logdir1, logdir2=logdir2)

        writer = tf.summary.FileWriter(logdir2, sess.graph)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        for epoch in range(1, hp.Train2.num_epochs + 1):
            for step in tqdm(range(model.num_batch), total=model.num_batch, ncols=70, leave=False, unit='b'):
                if queue:
                    sess.run(train_op)
                else:
                    mfcc, spec, mel = get_batch(model.mode, model.batch_size)
                    sess.run(train_op, feed_dict={model.x_mfcc: mfcc, model.y_spec: spec, model.y_mel: mel})

            # Write checkpoint files at every epoch
            summ, gs = sess.run([summ_op, global_step])

            if epoch % hp.Train2.save_per_epoch == 0:
                tf.train.Saver().save(sess, '{}/epoch_{}_step_{}'.format(logdir2, epoch, gs))

                # Eval at every n epochs
                with tf.Graph().as_default():
                    eval2.eval(logdir2, queue=False)

                # Convert at every n epochs
                with tf.Graph().as_default():
                    convert.convert(logdir2, queue=False)

            writer.add_summary(summ, global_step=gs)

        writer.close()
        coord.request_stop()
        coord.join(threads)
Ejemplo n.º 7
0
def agent(obs, config):
    # читаем (obs, config) во внутренний объект состояния игры
    state = State(obs, config)

    # Объект actions хранит список ожидающих кораблей / верфей.
    # после решения, мы удаляем корабли / верфи из списков ожидания и сохраняем
    # их в словаре вместе с их действиями
    actions = Actions(state)

    # преобразовать подходящие корабли в верфи
    convert(state, actions)

    # планируем, где мы хотим создавать новые корабли
    spawns = Spawns(state, actions)

    # размещение награды за выбранные корабли / верфи противника и запоминание
    # за какие корабли мы назначили награду на будущее
    global ship_target_memory
    bounties = Bounties(state, ship_target_memory)
    ship_target_memory = bounties.target_list

    # задаем пункты назначения для кораблей и ранжируем их, насколько ближе
    # мы добираемся до пунктов назначения
    targets = Targets(state, actions, bounties, spawns)

    # решаем ходы кораблей
    move(state, actions, targets)

    # создаем новые корабли на незанятых верфях
    spawns.spawn(state, actions)

    return actions.asdict()
Ejemplo n.º 8
0
    def __offset(self, text):
        direct = self.__direct_offset(text)
        offset_type = 'l'
        offset_delta = 0L
        offset_relatif = 0L

        # Get the offset information
        if direct:
            offset_delta = convert.convert(text)
        else:
            match_abs = re.compile(self.se_offset_abs).match(text)
            match_add = re.compile(self.se_offset_add).match(text)

            if match_abs:
                offset_relatif = convert.convert(match_abs.group(1))

                if match_abs.group(2) != None:
                    offset_type = match_abs.group(2)[1]

            elif match_add:
                offset_relatif = convert.convert(match_add.group(1))

                if match_add.group(2) != None:
                    offset_type = match_add.group(2)[1]

                if match_add.group(3) == '-':
                    offset_delta = 0L - match_add.group(4)
                else:
                    offset_delta = convert.convert(match_add.group(4))

        return (direct, offset_type, offset_delta, offset_relatif)
Ejemplo n.º 9
0
def agent(obs, config):
    # read (obs, config) into internal game state object
    state = State(obs, config)

    # actions object stores a list of pending ships/yards. as we decide on
    # actions, we remove the ships/yards from the pending lists and store
    # them in a dictionary together with their actions
    actions = Actions(state)

    # convert appropriate ships into yards
    convert(state, actions)

    # plan where we want to spawn new ships
    spawns = Spawns(state, actions)

    # place bounties on selected opponent ships/yards and remember
    # which ships we set bounties on for the future
    global ship_target_memory
    bounties = Bounties(state, ship_target_memory)
    ship_target_memory = bounties.target_list

    # set destinations for ships and rank moves by how much closer
    # we get to the destinations
    targets = Targets(state, actions, bounties, spawns)

    # decide on moves for ships
    move(state, actions, targets)

    # spawn the new ships at unoccupied shipyards
    spawns.spawn(state, actions)

    return actions.asdict()
Ejemplo n.º 10
0
def download():
    #Downloads csd17 files and converts to vector representation

    #Connect to big data server and change working directory
    ssh = paramiko.SSHClient()
    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    ssh.connect(myHostname, username=myUsername, password=myPassword)
    sftp = ssh.open_sftp()
    sftp.chdir('../../data5/camdaNonC/')

    #Get files
    paths = get_remote_path(sftp)

    localpath = 'vectored_data/temp.fastq'
    for path in paths:
        if not already_converted(path[-1-12:-1-5]):
            #Download file
            sftp.get(path,localpath)

            #Convert temp file to vectored representation
            convert.convert(path[-1-12:-1-5])

            #Delete temp file
            os.remove("vectored_data/temp.fastq")
            os.remove("vectored_data/temp.fasta")
        #end
    #end

    sftp.close()
    ssh.close()  
Ejemplo n.º 11
0
def main():
    load_dotenv()

    url = os.environ["GIST_URL"]
    res = requests.get(url)
    gist_extension_set = set(
        [j["metadata"]["publisherId"] for j in res.json()])

    with open("extensions.json", "r") as j_r:
        json_file_load = json.load(j_r)
    file_extension_set = set(flatten(json_file_load.values()))

    rm_extensions = file_extension_set - gist_extension_set
    add_extensions = gist_extension_set - file_extension_set

    content = {
        key: create_extensions_array(key, value, rm_extensions,
                                     (add_extensions if key == "unknown" else
                                      ()))
        for key, value in json_file_load.items()
    }

    # ファイルの差分がなくても、拡張機能のDL数が増えると差分が生じてしまうので、明示的にreturnを入れる
    if (len(rm_extensions) == 0 & len(add_extensions) == 0):
        return

    convert()
    with open("extensions.json", "w") as j_w:
        json.dump(content, j_w, indent=2)
        # 手元でファイルを更新する時との差分が生じないように、末尾改行を入れる。
        j_w.write("\n")
Ejemplo n.º 12
0
	def __offset (self,text):
		direct = self.__direct_offset(text)
		offset_type = 'l'
		offset_delta = 0L
		offset_relatif = 0L

		# Get the offset information
		if direct:
			offset_delta = convert.convert(text)
		else:
			match_abs = re.compile(self.se_offset_abs).match(text)
			match_add = re.compile(self.se_offset_add).match(text)

			if match_abs:
				offset_relatif = convert.convert(match_abs.group(1))

				if match_abs.group(2) != None:
					offset_type = match_abs.group(2)[1]


			elif match_add:
				offset_relatif = convert.convert(match_add.group(1))

				if match_add.group(2) != None:
					offset_type = match_add.group(2)[1]

				if match_add.group(3) == '-':
					offset_delta = 0L - match_add.group(4)
				else:
					offset_delta = convert.convert(match_add.group(4))

		return (direct,offset_type,offset_delta,offset_relatif)
Ejemplo n.º 13
0
def init_config(config, defaults, version, specs):

  if len(config.config) == 0:
    config.config.update(copy.deepcopy(defaults))
    set_version(config, version)

  file_ver = get_version(config)
  ver = file_ver
  while ver != version:
    if ver < version:
      key = (ver, ver+1)
    else:
      key = (ver, ver-1)

    spec = specs.get(key)
    if spec:
      convert(spec, config)
      ver = get_version(config)
    else:
      raise ValueError("Config file conversion v%s -> v%s not supported" %
        (file_ver, version))

  for key in config.config.keys():
    if key not in defaults:
      del config.config[key]

  return file_ver
Ejemplo n.º 14
0
def main(fl):
    filenames = fl  #16.1.1
    with open(filenames) as f:
        reader = csv.reader(f)
        header_row = next(reader)
        print(header_row)

        datas, highs, lows = [], [], []
        for row in reader:
            try:
                current_date = datetime.strptime(row[0], "%Y-%m-%d")
                high = convert(float(row[1]))
                low = convert(float(row[3]))

            except ValueError:
                print(current_date, 'missing data')

            else:
                datas.append(current_date)
                highs.append(high)
                lows.append(low)

    title = []
    title.append(datas)
    title.append(highs)
    title.append(lows)
    return title
Ejemplo n.º 15
0
 def save_new_message(self, message):
     while not self.is_okay_to_work():
         time.sleep(10)	
     convert.convert(self.OUTPUT_DIR)
     self.MessageList.append(message)
     if int(message['msg_id']) > self.latest_id:
         self.latest_id = int(message['msg_id'])
     self.write_message_data()
Ejemplo n.º 16
0
def img_actions(keyword):
    img_dir = os.path.dirname(
        os.path.abspath(__file__)) + "/downloads/" + keyword
    count = 250
    download.download(keyword, count, img_dir)
    rename.rename(keyword, img_dir)
    convert.convert(img_dir)
    convert.downscale(img_dir)
Ejemplo n.º 17
0
 def save_new_message(self, message):
     while not self.is_okay_to_work():
         time.sleep(10)
     convert.convert(self.OUTPUT_DIR)
     self.MessageList.append(message)
     if int(message['msg_id']) > self.latest_id:
         self.latest_id = int(message['msg_id'])
     self.write_message_data()
Ejemplo n.º 18
0
def main(argv=None):
    logging_level = logging.WARNING
    filter_port = -1
    if argv is None:
        argv = sys.argv
    filenames = []
    idx = 1
    while idx < len(argv):
        if argv[idx] == '-h' or argv[idx] == '--help':
            PrintUsage()
            return 0
        elif argv[idx] == '--port':
            idx += 1
            if idx >= len(argv):
                PrintUsage()
                return 1
            filter_port = int(argv[idx])
        elif argv[idx] == '-ld':
            logging_level = logging.DEBUG
        elif argv[idx] == '-li':
            logging_level = logging.INFO
        elif argv[idx] == '-lw':
            logging_level = logging.WARN
        elif argv[idx] == '-le':
            logging_level = logging.ERROR
        elif argv[idx][0:1] == '-':
            print "Unknow option:", argv[idx]
            PrintUsage()
            return 1
        else:
            filenames.append(argv[idx])
        idx += 1

    # set the logging level
    logging.basicConfig(level=logging_level)

    if len(filenames) == 1:
        pcap_file = filenames[0]
        har_file = pcap_file + ".har"
    elif len(filenames) == 2:
        pcap_file = filenames[0]
        har_file = filenames[1]
    else:
        PrintUsage()
        return 1

    # If excpetion raises, do not catch it to terminate the program.
    inf = open(pcap_file, 'r')
    pcap_in = inf.read()
    inf.close
    har_out = StringIO.StringIO()
    options = convert.Options()
    options.remove_cookie = False
    convert.convert(pcap_in, har_out, options)
    har_out_str = har_out.getvalue()
    outf = open(har_file, 'w')
    outf.write(har_out_str)
    outf.close()
Ejemplo n.º 19
0
def main(argv=None):
  logging_level = logging.WARNING
  filter_port = -1
  if argv is None:
    argv = sys.argv
  filenames = []
  idx = 1
  while idx < len(argv):
    if argv[idx] == '-h' or argv[idx] == '--help':
      PrintUsage()
      return 0
    elif argv[idx] == '--port':
      idx += 1
      if idx >= len(argv):
        PrintUsage()
        return 1
      filter_port = int(argv[idx])
    elif argv[idx] == '-ld':
      logging_level = logging.DEBUG
    elif argv[idx] == '-li':
      logging_level = logging.INFO
    elif argv[idx] == '-lw':
      logging_level = logging.WARN
    elif argv[idx] == '-le':
      logging_level = logging.ERROR
    elif argv[idx][0:1] == '-':
      print "Unknow option:", argv[idx]
      PrintUsage()
      return 1
    else:
      filenames.append(argv[idx])
    idx += 1

  # set the logging level
  logging.basicConfig(level=logging_level)

  if len(filenames) == 1:
    pcap_file = filenames[0]
    har_file = pcap_file + ".har"
  elif len(filenames) == 2:
    pcap_file = filenames[0]
    har_file = filenames[1]
  else:
    PrintUsage()
    return 1

  # If excpetion raises, do not catch it to terminate the program.
  inf = open(pcap_file, 'r')
  pcap_in = inf.read()
  inf.close
  har_out = StringIO.StringIO()
  options = convert.Options()
  options.remove_cookie = False
  convert.convert(pcap_in, har_out, options)
  har_out_str = har_out.getvalue()
  outf = open(har_file, 'w')
  outf.write(har_out_str)
  outf.close()
Ejemplo n.º 20
0
def get_converted_audio(user_id, user_audio_path, org_audio_path,
                        start_transcript, end_transcrpit):  #아래 함수들을 한번에 실행
    editconfig.speaker_json(user_audio_path, org_audio_path)
    editconfig.train_json(user_audio_path)
    editconfig.test_json(org_audio_path)
    editconfig.synthesis_json(user_id, org_audio_path, start_transcript,
                              end_transcrpit)
    convert.convert()
    hydra._internal.hydra.GlobalHydra().clear()
Ejemplo n.º 21
0
def audioIngest():
    #control that all paths exist
    controlPaths()
    #copy the files to the temporary location
    copyFiles()
    #once it's done, convert the files
    if not copyFiles():
        convert()
        addToLibrary()
Ejemplo n.º 22
0
def wire_convert_file(filename, data):
    if not data:
        return
    with open(filename, 'a') as f:
        if type(data) is list:
            f.writelines(["%s\n" % convert.convert(item) for item in data])
        elif type(data) is dict:
            line = convert.convert(data) + '\n'
            f.write(line)
    return
Ejemplo n.º 23
0
def main():
    if len(sys.argv) != 3:
        sys.stderr.write("usage: %s input.fon output.(woff|ttf|...)\n" %
                         sys.argv[0])
    else:
        # Rectangular pixels
        width_adjust = 388 * 4 / 3.0 / 644
        convert(fontGlyphs(open(sys.argv[1], 'rb').read()),
                sys.argv[2],
                par=width_adjust)
Ejemplo n.º 24
0
def get_converted_audio(user_id, user_audio_path,
                        org_audio_path):  #아래 함수들을 한번에 실행
    editconfig.speaker_json(user_audio_path, org_audio_path)
    editconfig.train_json(user_audio_path)
    editconfig.test_json(org_audio_path)
    editconfig.synthesis_json(user_id, org_audio_path)
    preprocess.preprocess_dataset()
    hydra._internal.hydra.GlobalHydra().clear()
    convert.convert()
    hydra._internal.hydra.GlobalHydra().clear()
Ejemplo n.º 25
0
 def test_convert(self):
     convert(INPUT_FILENAME, OUTPUT_FILENAME)
     
     
     with open(OUTPUT_FILENAME, 'r') as file1:
         with open(EXPECTED_OUTPUT_FILENAME, 'r') as file2:
             difference = set(file1).difference(file2)
     difference.discard('\n')
     
     self.assertFalse(difference)
Ejemplo n.º 26
0
def file_process(conllUFile, verbose=False):
    try:
        options = "output"
        convert(conllUFile, options)
        if verbose:
            basefn = os.path.splitext(os.path.basename(conllUFile))[0]
            annfn = os.path.join(options, basefn + '.ann')
            click.echo('\nInfo: Writing into {0}.'.format(annfn))
        click.echo('\nInfo: Correctly processed {0}.'.format(conllUFile))
    except Exception:
        click.echo('\nError: Error in processing {0}.'.format(conllUFile))
Ejemplo n.º 27
0
    def test_h5ad_to_zarr(self, h5ad_file, tmpdir):
        p = tmpdir.join("filtered_gene_bc_matrices.zarr")
        input = h5ad_file
        output = str(p)
        convert(input, output)

        # read back and check a few things
        adata = read_zarr(output)
        assert adata.X.shape == (2700, 32738)
        assert adata.obs.shape == (2700, 0)
        assert adata.var.shape == (32738, 1)
Ejemplo n.º 28
0
 def add(self, rdr, callback=None):
     if self.__readers:
         nrdr = convert.convert(rdr, (self.__format, ),
                                (self.__framerate, ))
     else:
         nrdr = convert.convert(
             rdr, (linear_8_mono_signed, linear_8_stereo_signed,
                   linear_16_mono, linear_16_stereo))
         self.__framerate = nrdr.getframerate()
         self.__format = nrdr.getformat()
     self.__readers.append((nrdr, callback))
     self.__mapreaders.append((rdr, nrdr))
Ejemplo n.º 29
0
    def _prepare_minibatch(self, src, label, trg, batch_size, gpu_id):
        data = []
        for s, l, t in zip(src, label, trg):
            data.append(self._convert(s, t, l))

        if self.sort:
            data = sorted(data, key=lambda x: len(x[0]), reverse=True)
        batches = [convert.convert(data[b * batch_size: (b + 1) * batch_size], gpu_id) for b in range(len(data) // batch_size)]
        if len(data) % batch_size != 0:
            batches.append(convert.convert(data[-(len(data) % batch_size):], gpu_id))

        return batches
Ejemplo n.º 30
0
def queue_job(object):
    if async_installed:
        try:
            settings = Settings(object)
            async = getUtility(IAsyncService)
            async .queueJob(convert, object)
            settings.converting = True
        except:
            logger.exception("Error using plone.app.async with wc.pageturner. "
                             "Converting pdf without plone.app.async...")
            convert(object)
    else:
        convert(object)
Ejemplo n.º 31
0
def queue_job(object):
    if async_installed:
        try:
            settings = Settings(object)
            async = getUtility(IAsyncService)
            async.queueJob(convert, object)
            settings.converting = True
        except:
            logger.exception("Error using plone.app.async with wc.pageturner. "
                             "Converting pdf without plone.app.async...")
            convert(object)
    else:
        convert(object)
Ejemplo n.º 32
0
def test_convert():
    """test if convert() generates the desired wav from mp3"""
    filename = os.listdir("./music/mp3")[0]
    pathfile = "./music/mp3/" + filename
    # convert mp3 to wav
    c.convert(pathfile)

    find = False
    for file in os.listdir("./music/wav"):
        if file.endswith(".wav"):
            if file[:-4] == filename[:-4]:
                find = True

    assert find, "convert wav works"
Ejemplo n.º 33
0
def firststep(conn):
    """ ingest a directory of music for database construction

    USAGE
    + this is the prerequisite for all analyses/identification
    + run this function to get a nicely-built music database
    + which will be used as the reference for audio matching
    
    WHAT IT DOES
    + sql database contruction
    + read all mp3 files from a local dir
    + read metadata
    + convert mp3 to wav
    + compute spectrograms and fingerprints
    + record all info in the database

    GOOD FOR
    + lazy guys like me who don't want to contruct db manually
    """

    # create tables if non-exist
    d.create_table(conn)
    log.info("database created")
    # construct database
    for file in os.listdir("./music/mp3"):
        if file.endswith(".mp3"):
            pathfile = "./music/mp3/" + file
            # read metadata
            tup = c.meta(pathfile)
            d.add_song(tup, conn)
            # convert mp3 to wav
            c.convert(pathfile)
    log.info('all metadata recorded in the database')
    log.info('all audio converted to wav')

    for file in os.listdir("./music/wav"):
        if file.endswith(".wav"):
            pathfile = "./music/wav/" + file
            # compute spectrogram and fingerprints
            framerate, f, t, spect = a.spectrogram(pathfile)
            fingerprints1 = a.fingerprint(f, spect)
            fingerprints2 = a.fingerprint2(f, spect, framerate)
            song_id = d.select_songid(file, conn)
            log.info('audio file no. %s recorded in the database', song_id)
            # add fingerprints to database
            d.add_fingerprint(file, t, fingerprints1, fingerprints2, conn)
            # update fingerprinted status
            d.update_fingerprinted(song_id, conn)

    print('Done! Please check out your database ❤')
Ejemplo n.º 34
0
def do_the_business():
    # open logfile
    with open(logfile_name,'w') as logfile:

        # some details
        broadcast(logfile,"File list contains %d files"%len(file_list))

        # delete the database
        if do_delete_db:
            os.remove(dbfile_name)

        # analysis stage
        if do_analyse:
            start = datetime.now()
            analyse.analyse(file_list=file_list,dbfile_name=dbfile_name,logfile=logfile,use_multiprocessing=use_multiprocessing,rel_judgment_dir=rel_judgment_dir)
            elapsed = datetime.now() - start
            broadcast(logfile,"Analyse phase took %s"%elapsed)

        # crossreference stage
        if do_crossreference:
            start = datetime.now()
            crossreference.crossreference(file_list=file_list,dbfile_name=dbfile_name,logfile=logfile,use_multiprocessing=use_multiprocessing)
            elapsed = datetime.now() - start
            broadcast(logfile,"Crossreference phase took %s"%elapsed)

        # convert stage
        if do_convert:
            conversion_start = time.time()
            start = datetime.now()
            convert.convert(file_list=file_list,dbfile_name=dbfile_name,logfile=logfile,public_html_dir=public_html_dir,use_multiprocessing=use_multiprocessing,do_legislation=do_legislation)
            elapsed = datetime.now() - start
            broadcast(logfile,"Convert phase took %s"%elapsed)
            if do_delete_html:
                delete_html.delete_html(conversion_start,output_dir)

        # disambiguation stage
        if do_disambiguation:
            disambiguation_start = time.time()
            start = datetime.now()
            disambiguation.disambiguation(file_list=file_list,dbfile_name=dbfile_name,logfile=logfile,output_dir=output_dir,use_multiprocessing=use_multiprocessing)
            elapsed = datetime.now() - start
            broadcast(logfile,"Disambiguation phase took %s"%elapsed)

        # index stage
        if do_index:
            start = datetime.now()
            indexes.make_indexes(dbfile_name=dbfile_name,logfile=logfile,output_dir=output_dir,use_multiprocessing=use_multiprocessing)
            elapsed = datetime.now() - start
            broadcast(logfile,"Index phase took %s"%elapsed)
Ejemplo n.º 35
0
def _main():
    args = _parse_arguments()
    global _debug
    _debug = args.debug
    try:
        if args.command in ['capture', 'cap']:
            capture(args)
        elif args.command in ['clean']:
            clean(args)
        elif args.command in ['convert', 'con']:
            convert(args)
    except KeyboardInterrupt:
        # TODO: Maybe track some statistics and print them on exit.
        print()
        sys.exit(0)
Ejemplo n.º 36
0
	def sendParam(self, name, value, type=MAVLINK_TYPE_FLOAT):
		print("Type: " + str(type))
		if type == MAVLINK_TYPE_INT32_T:
			print("Converting")
			value = convert(value)

		self.master.param_set_send(name, value, type)
Ejemplo n.º 37
0
def serveGIF():
	try :
		gifname = convert.convert(request.form.getlist('url')[0])
	except IndexError as e :
		return render_template('gif.html', gif = None)
	else :
		return render_template('gif.html', gif = gifname)
Ejemplo n.º 38
0
def test_attribute_and_text_list():
    original: Dict = {
        "outer_list": [{
            "my_element": {
                "_": "value_A",
                "@attribute1": "foo_A",
                "@attribute2": "bar_A"
            }
        }, {
            "my_element": {
                "_": "value_B",
                "@attribute1": "foo_B",
                "@attribute2": "bar_B"
            }
        }]
    }
    expected: Dict = {
        "outer_list": [{
            "my_element@attribute1": "foo_A",
            "my_element@attribute2": "bar_A",
            "my_element": "value_A"
        }, {
            "my_element@attribute1": "foo_B",
            "my_element@attribute2": "bar_B",
            "my_element": "value_B"
        }]
    }
    actual: Dict = convert(original)
    assert expected == actual
Ejemplo n.º 39
0
  def insert(self, data, table_name = 'dumptruck', **kwargs):
    try:
      self.create_table(data, table_name)
    except:
      raise

    # Turn it into a list of zips.
    converted_data = convert(data)

    for row in converted_data:
      self.__check_and_add_columns(table_name, row)
    
    # .keys() and .items() are in the same order
    # http://www.python.org/dev/peps/pep-3106/
    for row in converted_data:
      keys = [pair[0] for pair in row]
      values = [pair[1] for pair in row]

      question_marks = ','.join('?'*len(keys))

      # This is vulnerable to injection.
      sql = u'INSERT OR REPLACE INTO %s (%s) VALUES (%s);' % (
        quote(table_name), ','.join(keys), question_marks)
      self.execute(sql, values, commit=False)

    self.__commit_if_necessary(kwargs)
Ejemplo n.º 40
0
def main(r, settings):

    for mention in r.inbox.mentions(limit=5): # or use .unread? # or use .stream ...

        if not mention.new:
            print("> already replied")
            break        

        print('{}>>>{}'.format(mention.author, mention.body))
        
        ##### instance checker
        
        submission_url = mention.submission.url
        
        reply_text = convert(submission_url) # remove emojis
        print(reply_text)
        
        #####

        try:
            if settings.prod:
                mention.reply(reply_text)
            else:
                print("########replied######")
            mention.mark_read()

        except Exception as e:
            print("> error while replying")
            print(e)
            continue

        print("> replied to {}".format(mention.author))

    print("...sleeping. Time: {}".format(current_time()))
    time.sleep(settings.sleep_time) 
Ejemplo n.º 41
0
  def create_table(self, data, table_name, error_if_exists = False, **kwargs):
    'Create a table based on the data, but don\'t insert anything.'

    converted_data = convert(data)
    if len(converted_data) == 0 or converted_data[0] == []:
      raise ValueError(u'You passed no sample values, or all the values you passed were null.')
    else:
      startdata = OrderedDict(converted_data[0])

    # Select a non-null item
    for k, v in startdata.items():
      if v != None:
        break
    else:
      v = None

    if_not_exists = u'' if error_if_exists else u'IF NOT EXISTS'

    # Do nothing if all items are null.
    if v != None:
      try:
        # This is vulnerable to injection.
        sql = u'''
          CREATE TABLE %s %s (
            %s %s
          );''' % (if_not_exists, quote(table_name), quote(k), get_column_type(startdata[k]))
        self.execute(sql, commit = False)
      except:
        raise
      else:
        self.commit()
 
      for row in converted_data:
        self.__check_and_add_columns(table_name, row)
Ejemplo n.º 42
0
def test_convert():
    data = load_fixture()[0]
    result = (
    'snippet audio.dispose(audioHandle) "Releases audio memory associated with the handle."\n' +
    'audio.dispose( ${1:audioHandle} )\n' + 
    'endsnippet')
    assert result == convert(data)
Ejemplo n.º 43
0
def zip(asker, req, a, b):
    if (properties.check_firmly(asker, is_empty(), a) or 
            properties.check_firmly(asker, is_empty(), b)):
        return asker.reply(answer=empty())
    zipped_first = T.pair(fields.get(asker, first(), a), fields.get(asker, first(), b))
    zipped_tail = zip(fields.get(asker, tail(), a), fields.get(asker, tail(), b))
    return asker.ask_tail(convert.convert(cons(zipped_first, zipped_tail), req))
Ejemplo n.º 44
0
 def POST(self):
     try:
         args = web.input(xlsfile={}) 
     except ValueError:
         return "File too large. Maximum file size is 50MB"
         
     if 'xlsfile' not in args:
         web.seeother('/')
     
     #Cleanup the file path  
     filepath = args.xlsfile.filename.replace('\\','/')
     filename = filepath.split('/')[-1]
     
     if not filename.lower().endswith('xls'):
         raise web.seeother('/?e=Only xls files are accepted')
     
     #Generate a unique folder to store the uploaded file in    
     ID = str(int(random.random()*1000000000000))
     os.mkdir(upload_dir+ID)  
     
     #Store the uploaded xls 
     fout = open(upload_dir+ID+'/'+filename,'w') 
     fout.write(args.xlsfile.file.read())
     fout.close()
     
     #Remove any expired uploads
     self.cleanup_files(upload_dir)
     
     #Convert the file. Report any errors, or offer the id of the
     #folder to download from
     try:
         download_ID = convert.convert(upload_dir +'/'+ID+'/'+filename)
     except:
         raise web.seeother('/?e='+ str(sys.exc_info()[1]))
     raise web.seeother('/?id=' + download_ID)
Ejemplo n.º 45
0
	def __oper_mask (self,text):
		type_mask_and = string.split(text,'&')
		type_mask_or = string.split(text,'^')

		if len(type_mask_and) > 1:
			oper = '&'
			mask = convert.convert(type_mask_and[1])
			rest = type_mask_and[0]
			return (oper,mask,rest)
		elif len(type_mask_or) > 1:
			oper = '^'
			mask = convert.convert(type_mask_or[1])
			rest = type_mask_or[0]
			return (oper,mask,rest)
		else:
			return ('',0L,text)
Ejemplo n.º 46
0
def _test(text, upper=True):
    """
    >>> _test('background: #FFF; color: #00AACC', False)
    'background: #fff; color: #00aacc'
    >>> _test('background: #fff; color: #00aacc')
    'background: #FFF; color: #00AACC'
    """
    return convert(text, upper)
Ejemplo n.º 47
0
def latex_code_changed(widget):
	try:
		preview_input.set_text(convert.convert(widget.get_text()))
	except:
		msg = "Exception: "
		msg += str(sys.exc_info()[0])
		msg += ": "
		msg += str(sys.exc_info()[1])
		preview_input.set_text(msg)
Ejemplo n.º 48
0
def convert_stuff(args,user="",hostmask="",extra={}):
    if args == "my ass": return "\x02Error: \x0fOverflow error, value exceeded graham's number or so."
    
    value = args.split(" ")[0].lstrip().rstrip()
    unit1 = args.split(value)[1].split(" to ")[0].lstrip().rstrip()
    unit2 = args.split(value)[1].split(" to ")[1].lstrip().rstrip()
    
    try: return "\x02Conversion: \x0f" + str(convert.convert(value,unit1,unit2))
    except: return "\x02Error: \x0fInvalid units found, use listunit for units listing."
Ejemplo n.º 49
0
def command_latex_code_changed(*args):
    try:
        unicode_code.set(convert.convert(latex_code.get()))
    except:
        msg = "Exception: "
        msg += str(sys.exc_info()[0])
        msg += ": "
        msg += str(sys.exc_info()[1])
        unicode_code.set(msg)
Ejemplo n.º 50
0
 def stlToMesh(self):
     if(self.fname != Null):
         if(convert.convert(self.fname)):
             self.meshFile = self.fname[:-3]+"mesh"
             print self.meshFile; 
         else:
             print "cannot convert to mesh" 
     else:
         print "have not select a name"
    def run(self, edit, show_regions=True, show_panel=True):
        selected = True

        for region in self.view.sel():
            if not region.empty():
                s = self.view.substr(region)
                replaced = convert(s)
                self.view.replace(edit, region, replaced)
            else:
                selected = False

        if selected is False:
            view = self.view
            context = view.substr(sublime.Region(0, view.size()))
            replace_text = convert(context)

            view.replace(edit, sublime.Region(0, view.size()), replace_text)

        print('Converted: uppercase')
Ejemplo n.º 52
0
def download_vid(vidname):
    c = convert(mydir+'/content/vid/'+vidname, request.form['format'])
    if c.status == 0:
        vid = open(c.path+c.filename,'r').read()
        resp = make_response(vid)
        resp.headers['Content-Disposition'] = 'attachment; filename="' + c.filename + '"'
        resp.headers['Content-Length'] = len(vid)
        resp.content_type = "application/octet-stream"
        return resp
    else:
        return "500: Server Error"
Ejemplo n.º 53
0
def do_convert(path):
    url = BASE_URL + request.full_path
    logging.debug(url)

    f = urllib2.urlopen(url)
    ical = convert.convert(f.read())

    resp = make_response(ical)
    resp.headers['Content-type'] = 'text/calendar; charset=utf-8'
    resp.headers['Content-Disposition'] = 'inline; filename=calendar.ics'

    return resp
Ejemplo n.º 54
0
def exportToTensorflow(request):
    if request.method == 'POST':
        net = yaml.safe_load(request.POST.get('net'))
        net_name = request.POST.get('net_name')
        if net_name == '':
            net_name = 'Net'

        # rename input layers to 'data'
        inputs = get_inputs(net)
        for i in inputs:
            net[i]['props']['name'] = 'data'

        prototxt,input_dim = jsonToPrototxt(net,net_name)
        randomId=datetime.now().strftime('%Y%m%d%H%M%S')+randomword(5)
        with open(BASE_DIR+'/media/'+randomId+'.prototxt', 'w') as f:
            f.write(prototxt)

        convert(BASE_DIR+'/media/'+randomId+'.prototxt', None, None, BASE_DIR+'/media/'+randomId+'.py', 'test')

        # NCHW to NHWC data format
        input_caffe = input_dim
        input_tensorflow = []
        for i in [0,2,3,1]:
            input_tensorflow.append(input_caffe[i])

        # converting generated caffe-tensorflow code to graphdef
        try:
            net = __import__ (str(randomId))
            images = tf.placeholder(tf.float32, input_tensorflow)
            # the name of the first layer should be 'data' !
            net = getattr(net, net_name)({'data': images})
            graph_def = tf.get_default_graph().as_graph_def(add_shapes=True)
            with open(BASE_DIR+'/media/'+randomId+'.pbtxt', 'w') as f: f.write(str(graph_def))
        except AssertionError:
            return JsonResponse({'result': 'error', 'error': 'Cannot convert to GraphDef'})
        except AttributeError:
            return JsonResponse({'result': 'error', 'error': 'GraphDef not supported'})

        return JsonResponse({'result': 'success','id': randomId, 'name': randomId+'.pbtxt', 'url': '/media/'+randomId+'.pbtxt'})
Ejemplo n.º 55
0
def submitted(project_path, template_room_name, map_path, object_inputs):

    # input sanitization
    if project_path == '':
        return loc('project_empty')
    if template_room_name == '':
        return loc('template_room_empty')
    if map_path == '':
        return loc('map_empty')

    if not os.path.exists(project_path):
        return loc('project_nonexistent')
    project_extension = project_path.split('.')[-1]
    if project_extension == 'gmx':
        template_room_path = os.path.join(os.path.split(project_path)[0], 'rooms', template_room_name+'.room.gmx')
    else:
        template_room_path = os.path.join(util.get_application_path(), 'temp_gmksplit', 'Rooms', template_room_name+'.xml')
    if not os.path.exists(template_room_path):
        return loc('template_room_nonexistent')
    if not os.path.exists(map_path):
        return loc('map_nonexistent')

    for objectname, enabled in object_inputs.values():
        if enabled:
            if objectname == '':
                return loc('enabled_object_no_name')
            if project_extension == 'gmx':
                object_path = os.path.join(os.path.split(project_path)[0], 'objects', objectname + '.object.gmx')
                if not os.path.exists(object_path):
                    return loc('object_nonexistent') % objectname

    # build dict of object names that were enabled
    chosen_object_names = {}
    for object_name, (gm_object_name, enabled) in object_inputs.items():
        if enabled:
            chosen_object_names[object_name] = gm_object_name

    conversion_output_filename = convert.convert(project_path, template_room_path, map_path, chosen_object_names)

    # save preferences
    with open('prefs', 'w') as f:
        f.write('project|%s\n' % project_path)
        f.write('template|%s\n' % template_room_name)
        for object_name, (gm_object_name, enabled) in sorted(object_inputs.items()):
            f.write('%s|%s|%i\n' % (object_name, gm_object_name, enabled))

    if project_extension == 'gmx':
        base = loc('convert_success_gmx')
    else:
        base = loc('convert_success_gmk')
    return base % conversion_output_filename
Ejemplo n.º 56
0
def test_convert():
	"""Tests convert.convert()."""
	credit_file = "tests/data/credit_1.txt"
	credit_transactions = creditparser.parse_file(credit_file)

	debit_file = "tests/data/debit_1.txt"
	debit_transactions = debitparser.parse_file(debit_file)

	all_transactions = convert.convert([credit_file, debit_file])

	assert credit_file in all_transactions
	assert debit_file in all_transactions
	assert all_transactions[credit_file] == credit_transactions
	assert all_transactions[debit_file] == debit_transactions
Ejemplo n.º 57
0
def run_parse_args(args):
    systems = list(units.by_system)
    p = ArgumentParser(description="Convert recipes from US to European measures and vice versa")
    p.add_argument('-f', '--from', dest='from_', default='us', choices=systems)
    p.add_argument('-t', '--to', default='eur', choices=systems)
    p.add_argument('FILE',
                   nargs='?',
                   type=FileType('rb'),
                   default=sys.stdin)
    # TODO: Add output file
    x = p.parse_args(args)
    text = x.FILE.read()
    
    # TODO: HACK: Assume utf-8
    text = text.decode('utf-8')
    return convert(text, to=x.to, from_=x.from_)
Ejemplo n.º 58
0
def handle(event, context):
    job = retrieve(event, "job", "payload")
    # source: URL of zip archive of input USFM files
    source = retrieve(job, "source", "\"job\"")
    # stylesheets: (optional) list of CSS filenames
    stylesheets = [os.path.basename(DEFAULT_CSS)]
    if "stylesheets" in job:
        stylesheets.extend(job["stylesheets"])
    upload = retrieve(event, "upload", "payload")
    cdn_bucket = retrieve(upload, "cdn_bucket", "\"upload\"")
    cdn_file = retrieve(upload, "cdn_file", "\"upload\"")

    print('source: {}'.format(source))
    print('stylesheets: {}'.format(stylesheets))
    print('cdn_bucket: {}'.format(cdn_bucket))
    print('cdn_file: {}'.format(cdn_file))

    download_dir = tempfile.mkdtemp("download")
    scratch_dir = tempfile.mkdtemp("extracted")

    # download  and unzip the archive (source)
    basename = os.path.basename(source)
    downloaded_file = os.path.join(download_dir, basename)
    download_file(source, downloaded_file)
    with zipfile.ZipFile(downloaded_file) as zf:
        zf.extractall(scratch_dir)

    inputs = [os.path.join(root, filename)
              for root, dirs, filenames in os.walk(scratch_dir)
              for filename in filenames]
    outputs = convert(inputs, output_dir=scratch_dir, stylesheets=stylesheets)

    zip_file = os.path.join(scratch_dir, 'result.zip')
    with zipfile.ZipFile(zip_file, "w") as zf:
        if len(outputs) > 0:
            zf.write(DEFAULT_CSS, os.path.basename(DEFAULT_CSS))
        for filename in outputs:
            zf.write(filename, os.path.basename(filename))

    print("Uploading {0} to {1}/{2}".format(zip_file, cdn_bucket, cdn_file))
    boto3.client('s3').upload_file(zip_file, cdn_bucket, cdn_file)

    return {
        'success': True,
    }
Ejemplo n.º 59
0
  def create_table(self, data, table_name, error_if_exists = False, **kwargs):
    'Create a table based on the data, but don\'t insert anything.'
    converted_data = convert(data)
    startdata = dict(converted_data[0])

    # Select a non-null item
    for k, v in startdata.items():
      if v != None:
        break

    try:
      # This is vulnerable to injection.
      self.execute(u'''
        CREATE TABLE %s (
          %s %s
        );''' % (quote(table_name), quote(k), get_column_type(startdata[k])), commit = False)
    except sqlite3.OperationalError, msg:
      if (not re.match(r'^table.+already exists$', str(msg))) or (error_if_exists == True):
        raise
Ejemplo n.º 60
0
def convert(req):
    tmpfile = req.form["file"]
    block_size = int(req.form.get("block_size", None) or 8)
    alpha_value = float(req.form.get("alpha_value", None) or  1.0)
    filter_limit = float(req.form.get("filter_limit", None) or (block_size / 5.0))
    shape = req.form.get("shape", None)
    
    leafname, ext = path.splitext(tmpfile.filename)
    
    newfile = converter.convert(tmpfile.file,
                                shape_name=shape,
                                block_size=block_size,
                                alpha_value=alpha_value,
                                filter_limit=filter_limit,
                                outfile="/var/www/images/%s.svg" % leafname)
    
    basename = path.basename(newfile)
    
    util.redirect(req, "/handler.py/display?img=%s" % basename)