コード例 #1
0
def downloader(file_url):
    """ TODO """
    file_url = str(file_url)
    dir_name = urllib.parse.unquote(file_url.split('/')[5])
    zip_name = '.'.join([dir_name, 'zip'])
    # tar_name = '.'.join([dir_name, 'tar', 'gz'])

    try:
        if not os.path.exists(dir_name):
            os.mkdir(dir_name)

        file_num = extract_file_number(get_file_name(file_url))
        for i in range(int(file_num), -1, -1):
            web_url = file_url[0:(file_url.rindex('-') + 1)] + str(i) + 'a.jpg'

            if not check_file(web_url):
                web_url = file_url[0:(file_url.rindex('-') +
                                      1)] + str(i) + 'v.jpg'

            file_object = requests.get(web_url, allow_redirects=True)
            download_location = os.path.sep.join(
                [os.getcwd(), dir_name,
                 get_file_name(web_url)])

            with open(download_location, 'wb') as image:
                image.write(file_object.content)

        file_paths = get_all_file_paths(dir_name)
        result = upload_files(file_paths, dir_name)
        return jsonify(message=result)
    except Exception:
        pass
コード例 #2
0
def transform_single_dimen_xml(src_path, *transform_dimens):
    if re.match('.*\.xml$', src_path) is None:
        return

    if os.path.isfile(src_path) is None:
        return

    src_dimen = get_src_dimen(src_path)
    if src_dimen is None:
        return

    res_dir = get_res_dir(src_path)
    if res_dir is None:
        return

    file_name = get_file_name(src_path)
    if file_name is None:
        return

    for dst_dimen in transform_dimens:
        dst_dir = get_res_value_dir(res_dir, dst_dimen)
        if not os.path.exists(dst_dir):
            os.mkdir(dst_dir)
        dst_path = dst_dir + os.sep + file_name
        scale = float(get_dimen(src_dimen)) / float(get_dimen(dst_dimen))
        generate_single_dimen_xml(src_path, dst_path, scale)
コード例 #3
0
async def handle_message(evt: events.NewMessage.Event) -> None:
    if evt.is_private:
        if evt.message.text.startswith("הוספה") and evt.reply_to_msg_id:
            
            msg = evt.message.text.splitlines()
            name = msg[1].replace("שם:", "")
            desc = msg[2].replace("תקציר:", "")
            trailer = msg[3].replace("טריילר:", "")
            trailer.replace("watch?v=", "embed/")
            print(evt.reply_to_msg_id)
            reply_message_id = await client(functions.messages.GetMessagesRequest(id=[evt.reply_to_msg_id]))
            
            reply_msg = reply_message_id.messages[0].message.replace("Link to download file: ", "").replace(".html", "")
            file_id = reply_msg.split("/")[3]
            file_name = reply_msg.split("/")[4]
            img_count = 1
            for f in os.listdir("static/img"):
                if os.path.isfile("static/img/" + f):
                    print("is file!!")
                    img_count += 1
            print(img_count)
            await client.download_media(evt.message, file=f"static/img/{img_count}.jpg")
            add_video_to_db(name, desc, trailer, file_name, file_id, img_count)
            await evt.reply("Thanks, video added.")


        elif not evt.file:
            await evt.reply(start_message)
            return
        else:
            url = public_url / str(pack_id(evt)) / get_file_name(evt)
            await evt.reply(f"Link to download file: {url}.html")
            log.info(f"Replied with link for {evt.id} to {evt.from_id} in {evt.chat_id}")
            log.debug(f"Link to {evt.id} in {evt.chat_id}: {url}")
コード例 #4
0
ファイル: search.py プロジェクト: JsAaron/remove_background
def find(img, path, target):
    img_small = resize_to_resolution(img, 1024)
    small_h, w = img_small.shape[:2]

    im = cv2.GaussianBlur(img_small, (1, 1), 0)
    gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)

    edges = cv2.Canny(gray, *(20, 30))
    edges = cv2.dilate(edges, None)
    edges = cv2.erode(edges, None)

    (_, thresh) = cv2.threshold(edges, 127, 255, cv2.THRESH_BINARY)
    cimg, cnts, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
                                     cv2.CHAIN_APPROX_SIMPLE)

    # cv2.imshow("1",cimg)
    # cv2.waitKey(0)

    valid = []
    for cnt in cnts:
        x, y, w, h = cv2.boundingRect(cnt)
        if h == small_h:
            valid.append(x)

    if len(valid) >= 2:
        #  print('检索出更多的图',path)
        new_file_name = get_specified_dir(target, get_file_name(path))
        cv2.imwrite(new_file_name, img_small)
        return img_small
コード例 #5
0
    def upload(self, file_path):
        """ Uploads file to the drive """

        file_name = util.get_file_name(file_path)

        file_dupl = self.search_file(file_name)

        # file is already in the vault
        if file_dupl:

            util.ColorPrinter.print_warning(
                "This file is already in the vault.")
            msg = "Do you want to overwrite it? [Y, N]: "

            if not util.read_y_n(msg):
                print("Terminating...")
                return

            file_id = file_dupl[0]["id"]
            file = self.service.files().get(fileId=file_id).execute()

            del file['id']
            media_body = MediaFileUpload(file_path, resumable=True)

            updated_file = self.service.files().update(
                fileId=file_id, body=file, media_body=media_body).execute()

        else:
            file_metadata = {'name': file_name}
            media = MediaFileUpload(file_path)
            file = self.service.files().create(body=file_metadata,
                                               media_body=media,
                                               fields='id').execute()
コード例 #6
0
 def __init__(self, source_table_batch: SourceTableBatch):
     self.source_table_batch = source_table_batch
     self.source_table = source_table_batch.source_table
     self.source = source_table_batch.source_table.source
     self.file_name = get_file_name(self.source_table_batch)
     self.file_location = get_file_path(self.source_table_batch)
     self.stage_name = config.snowflake_stage_name[self.source.source]
コード例 #7
0
ファイル: mac.py プロジェクト: charlieyqin/fplutil
  def mac_install_cmake(self):
    """Check for and install cmake.

    Assumes that if cmake is already installed, then the user has correctly set
    their path variable such that the command "cmake --version" will work.

    Raises:
      FileDownloadError: If the cmake tar fails to download, or is incorrectly
          downloaded.
      ExtractionError: If the cmake tar cannot be properly extracted.
    """
    if find_executable("cmake"):
      logging.info("CMake already installed.")
      return
    cmake_version = util.get_file_name(
        CMAKE_VERSIONS.get(self.version)[0], False)
    location = util.check_dir(self.cmake_path, cmake_version, "bin/cmake")
    if location:
      self.cmake_path = location
      logging.info("CMake found at " + self.cmake_path)
      return

    logging.info("CMake not installed. Downloading now.")
    url, file_hash = CMAKE_VERSIONS.get(self.os_version, (None, None))
    url = urlparse.urljoin(CMAKE_DOWNLOAD_PREFIX, url)
    location = os.path.join(common.BASE_DIR, "cmake.tar.gz")
    location = util.download_file(url, location, "cmake", file_hash)
    if not location:
      raise common.FileDownloadError("https://cmake.org/download/", "Please "
                                     "rerun this script afterwards with the "
                                     "flag\n\t--cmake=/path/to/cmake")
    if not util.extract_tarfile(location, "r:gz", self.cmake_path, "cmake"):
      raise common.ExtractionError(location)
    logging.info("CMake successfully installed.")
コード例 #8
0
def test_get_file_name():
    s = '\n'
    s += 'abc         = ' + util.get_file_name('abc') + '\n'
    s += 'abc.ext     = ' + util.get_file_name('abc.ext') + '\n'
    s += 'abc.def.ext = ' + util.get_file_name('abc.def.ext') + '\n'
    s += 'aaa/abc     = ' + util.get_file_name('aaa/abc') + '\n'
    s += 'aaa/abc/    = ' + util.get_file_name('aaa/abc/') + '\n'
    s += 'aaa/abc.ext = ' + util.get_file_name('aaa/abc.ext') + '\n'
    s += 'aaa\\abc     = ' + util.get_file_name('aaa\\abc') + '\n'
    s += 'aaa\\abc.ext = ' + util.get_file_name('aaa\\abc.ext') + '\n'
    s += 'aaa\\abc.def\\xyz.ext = ' + util.get_file_name(
        'aaa\\abc.def\\xyz.ext') + '\n'
    return s
コード例 #9
0
ファイル: bing.py プロジェクト: CatchZeng/bing_images
def get_image_entries(urls, dir):
    entries = []
    i = 0
    for url in urls:
        name = get_file_name(url, i, "#tmp#")
        path = os.path.join(dir, name)
        entries.append((url, path))
        i = i + 1
    return entries
コード例 #10
0
ファイル: batched.py プロジェクト: tirinox/swap_space_time
def block_name(ids, temp_dir):
    # fixme: debug stuff!!
    temp_dir = 'example/t'
    # if os.path.isdir(temp_dir):
    #     shutil.rmtree(temp_dir)
    # os.makedirs(temp_dir, exist_ok=True)

    ident = '_'.join(map(str, ids))
    return util.get_file_name(ident, temp_dir, 'block', 'png')
コード例 #11
0
    def encrypt_file(self, path, key, iv, name_iv, f):
        """ Encrypts the <path> file located in  and """
        f_bytes = util.get_file_bytes(path)
        cipher = self.encrypt_bytes(f_bytes, key, iv)

        enc_name = self.encrypt_filename(util.get_file_name(path), key)

        f_name = enc_name + ".cipher"
        util.write_file_bytes(iv + cipher, backup_client.ENCRYPTION_POOL,
                              f_name, f)
        return os.path.join(backup_client.ENCRYPTION_POOL, f_name)
コード例 #12
0
async def handle_request(req: web.Request, head: bool = False) -> web.Response:
    if str(req.url).endswith("html"):
        return await handle_request1(req, head=False)

    file_name = req.match_info["name"]
    file_id = int(req.match_info["id"])
    print(f"file id: {file_id}, filename: {file_name}")
    video_url = str(public_url) + "/" + str(file_id) + "/" + file_name
    print("video_url:", video_url)

    video_name = file_name
    print(f"video_name: {video_name}")

   



    peer, msg_id = unpack_id(file_id)
    if not peer or not msg_id:
        return web.Response(status=404, text="404: Not Found, peer / msg_id not found")

    message = cast(Message, await client.get_messages(entity=peer, ids=msg_id))
    print(f"filename: {file_name}, getfilename: {get_file_name(message)}")
    if not message or not message.file or get_file_name(message) != file_name:
        return web.Response(status=404, text="404: Not Found, not message / message file")

    size = message.file.size
    offset = req.http_range.start or 0
    limit = req.http_range.stop or size

    if not head:
        ip = get_requester_ip(req)
        if not allow_request(ip):
            return web.Response(status=429)
        log.info(f"Serving file in {message.id} (chat {message.chat_id}) to {ip}")
        body = transfer.download(message.media, file_size=size, offset=offset, limit=limit)
    else:
        body = None

    

    return web.Response(status=206 if offset else 200,
                        body=body,
                        headers={
                            "Content-Type": message.file.mime_type,
                            "Content-Range": f"bytes {offset}-{size-1}/{size}",
                            "Content-Length": str(limit - offset),
                            "Content-Disposition": f'attachment; filename="{file_name}"',
                            "Accept-Ranges": "bytes",
                        })
コード例 #13
0
ファイル: mac.py プロジェクト: niu2x/gxm
    def mac_update_path(self):
        """Checks PATH variable and edits the bash profile accordingly.

    Check for the appropriate path for cmake and cwebp, and edit the bash
    profile to include it. Don't check for MacPorts or ImageMagick, as those
    are managed by their own installation scripts.
    """
        optbin_update = True
        optsbin_update = True
        cmake_path_update = True
        cwebp_path_update = True
        ant_path_update = True
        if find_executable("convert"):
            optbin_update = False
            optsbin_update = False
        if find_executable("cmake"):
            cmake_path_update = False
        if find_executable("cwebp"):
            cwebp_path_update = False
        if find_executable("ant"):
            ant_path_update = False
        if optbin_update or optsbin_update or cwebp_path_update or ant_path_update:
            with open(self.bash_profile, "a") as f:
                todays_date = (str(date.today().year) + "-" +
                               str(date.today().month) + "-" +
                               str(date.today().day))
                f.write("\n# The following block was inserted by fplutil/bin/"
                        "setup_all_prereqs.py on " + todays_date + "\n")
                if optbin_update:
                    f.write("export PATH=/opt/local/bin:$PATH\n")
                if optsbin_update:
                    f.write("export PATH=/opt/local/sbin:$PATH\n")
                if cmake_path_update:
                    cmake_version = util.get_file_name(
                        CMAKE_VERSIONS.get(self.version)[0], False)
                    cmake_bin = os.path.join(
                        self.cmake_path, os.path.join(cmake_version,
                                                      CMAKE_BIN))
                    f.write("export PATH=" + cmake_bin + ":$PATH\n")
                if cwebp_path_update:
                    cwebp_bin = os.path.join(
                        self.cwebp_path, os.path.join(CWEBP_VERSION, "bin"))
                    f.write("export PATH=" + cwebp_bin + ":$PATH\n")
                if ant_path_update:
                    ant_bin = os.path.join(self.ant_path,
                                           os.path.join(ANT_VERSION, "bin"))
                    f.write("export PATH=" + ant_bin + ":$PATH\n")
                f.write("\n")
                self.bash_profile_changed = True
コード例 #14
0
ファイル: mac.py プロジェクト: charlieyqin/fplutil
  def mac_update_path(self):
    """Checks PATH variable and edits the bash profile accordingly.

    Check for the appropriate path for cmake and cwebp, and edit the bash
    profile to include it. Don't check for MacPorts or ImageMagick, as those
    are managed by their own installation scripts.
    """
    optbin_update = True
    optsbin_update = True
    cmake_path_update = True
    cwebp_path_update = True
    ant_path_update = True
    if find_executable("convert"):
      optbin_update = False
      optsbin_update = False
    if find_executable("cmake"):
      cmake_path_update = False
    if find_executable("cwebp"):
      cwebp_path_update = False
    if find_executable("ant"):
      ant_path_update = False
    if optbin_update or optsbin_update or cwebp_path_update or ant_path_update:
      with open(self.bash_profile, "a") as f:
        todays_date = (str(date.today().year) + "-" + str(date.today().month)
                       + "-" + str(date.today().day))
        f.write("\n# The following block was inserted by fplutil/bin/"
                "setup_all_prereqs.py on " + todays_date + "\n")
        if optbin_update:
          f.write("export PATH=/opt/local/bin:$PATH\n")
        if optsbin_update:
          f.write("export PATH=/opt/local/sbin:$PATH\n")
        if cmake_path_update:
          cmake_version = util.get_file_name(
              CMAKE_VERSIONS.get(self.version)[0], False)
          cmake_bin = os.path.join(self.cmake_path,
                                   os.path.join(cmake_version, CMAKE_BIN))
          f.write("export PATH=" + cmake_bin + ":$PATH\n")
        if cwebp_path_update:
          cwebp_bin = os.path.join(self.cwebp_path,
                                   os.path.join(CWEBP_VERSION, "bin"))
          f.write("export PATH=" + cwebp_bin + ":$PATH\n")
        if ant_path_update:
          ant_bin = os.path.join(self.ant_path,
                                 os.path.join(ANT_VERSION, "bin"))
          f.write("export PATH=" + ant_bin + ":$PATH\n")
        f.write("\n")
        self.bash_profile_changed = True
コード例 #15
0
    def decrypt_file(self, path, key, res_path, force):
        """ Decrypts the file located in <path> and stores it as res_path/<encrypted file name>
            File names are encrypted using the same <name_iv> stored is the <CREDENTIALS> folder """

        cipher = util.get_file_bytes(path)

        # encrypted file == 16B IV | xB encrypted file content
        iv = cipher[:backup_client.IV_SIZE]
        cipher = cipher[backup_client.IV_SIZE:]

        AES_cip = AES.new(key, AES.MODE_CBC, iv)
        plain = bytes(AES_cip.decrypt(cipher))
        plain = util.unpad(plain)

        res_name = self.decrypt_filename(
            util.remove_cipher_extension(util.get_file_name(path)), key)
        util.write_file_bytes(plain, res_path, res_name, force)
        return res_name
コード例 #16
0
ファイル: dbx.py プロジェクト: d5/daniel.gs
    def __init__(self, **entries):
        if entries.get("is_deleted", False):
            raise Exception("The path is deleted: " + entries["path"])

        # setup basic info
        self.path = entries["path"]
        self.is_dir = entries.get("is_dir", False)
        self.rev = entries.get("rev", "0")
        self.hash = entries.get("hash", "")  # hash value for directory
        self.bytes = entries.get("bytes", 0)
        self._modified = entries.get("modified", None)

        # self._files: internal container for metadata contents of sub-files
        self._files = dict()
        if self.is_dir:
            # setup sub-file dict
            for c in entries.get("contents", list()):
                if not c.get("is_deleted", False):
                    self._files[get_file_name(c["path"]).lower()] = c

        # self._dbx_files: internal cache for dbx_file objects of sub-files
        self._dbx_files = dict()
コード例 #17
0
ファイル: mac.py プロジェクト: niu2x/gxm
    def mac_install_cmake(self):
        """Check for and install cmake.

    Assumes that if cmake is already installed, then the user has correctly set
    their path variable such that the command "cmake --version" will work.

    Raises:
      FileDownloadError: If the cmake tar fails to download, or is incorrectly
          downloaded.
      ExtractionError: If the cmake tar cannot be properly extracted.
    """
        if find_executable("cmake"):
            logging.info("CMake already installed.")
            return
        cmake_version = util.get_file_name(
            CMAKE_VERSIONS.get(self.version)[0], False)
        location = util.check_dir(self.cmake_path, cmake_version, "bin/cmake")
        if location:
            self.cmake_path = location
            logging.info("CMake found at " + self.cmake_path)
            return

        logging.info("CMake not installed. Downloading now.")
        url, file_hash = CMAKE_VERSIONS.get(self.os_version, (None, None))
        url = urlparse.urljoin(CMAKE_DOWNLOAD_PREFIX, url)
        location = os.path.join(common.BASE_DIR, "cmake.tar.gz")
        location = util.download_file(url, location, "cmake", file_hash)
        if not location:
            raise common.FileDownloadError(
                "https://cmake.org/download/", "Please "
                "rerun this script afterwards with the "
                "flag\n\t--cmake=/path/to/cmake")
        if not util.extract_tarfile(location, "r:gz", self.cmake_path,
                                    "cmake"):
            raise common.ExtractionError(location)
        logging.info("CMake successfully installed.")
コード例 #18
0
ファイル: dbx.py プロジェクト: d5/daniel.gs
 def file_name(self):
     return get_file_name(self.path)
コード例 #19
0
ファイル: dbx.py プロジェクト: d5/daniel.gs
    def get_files(self, patterns="*", index=0, count=-1, sort_key=None, sort_reverse=False, excludes=[]):
        if not self.is_dir:
            raise Exception("The path is not a directory: " + self.path)

        if index < 0:
            index = 0
        if count < 0:
            count = 1

        if not isinstance(patterns, list):
            patterns = [patterns]

        if not isinstance(excludes, list):
            excludes = [excludes]

        # change all patterns to lower-case
        skip_pattern_test = False
        for i in xrange(len(patterns)):
            patterns[i] = patterns[i].lower()
            if patterns[i] == "*":
                skip_pattern_test = True
        for i in xrange(len(excludes)):
            excludes[i] = excludes[i].lower()

        # apply filter based on patterns
        files = []
        for f, c in self._files.items():
            # exclude dir entries
            if c.get("is_dir", False):
                continue

            # test against exclude patterns
            exc = False
            for e in excludes:
                if fnmatch(f, e):
                    exc = True
                    break
            if exc:
                continue

            # test against patterns
            if skip_pattern_test:
                files.append(c)
            else:
                for p in patterns:
                    if fnmatch(f, p):
                        files.append(c)
                        break

        # sort files
        if sort_key == "modified":
            # sorting by modified time
            files.sort(key=lambda x: datetime_from_dropbox(x[sort_key]), reverse=sort_reverse)
        elif sort_key:
            # sorting by other key
            files.sort(key=lambda x: x[sort_key].lower(), reverse=sort_reverse)

        # enumerate
        if len(files):
            # adjust 'index' and 'count'
            filtered_count = len(files)
            if index >= filtered_count:
                index = filtered_count - 1
            if index + count > filtered_count:
                count = filtered_count - index

            return [self.get_file(get_file_name(c["path"])) for c in files[index : index + count]]
        else:
            return []
コード例 #20
0
 def test_get_file_name(self):
     self.assertEqual(util.get_file_name("/home/Desktop/file.txt"),
                      "file.txt")
     self.assertEqual(util.get_file_name("./file.txt"), "file.txt")
コード例 #21
0
def callback(ch, method, properties, body):

    try:

        global db

        session = sessionmaker()
        session.configure(bind=db.engine)

        local_session = session()

        requestParams = json.loads(body.decode('utf-8'))



        url = str(requestParams["url"])
        source_page = str(requestParams["sourcePage"])

        print("in function consuming inbound...with file {}".format(url))

        if "imageDate" in requestParams:
            image_date = str(requestParams["imageDate"])
        else:
            image_date = ""

        original_file_name = get_file_name(url)

        filename, file_extension = os.path.splitext(original_file_name)



        hash_object = hashlib.md5(url.encode('utf-8'))
        hash_str = hash_object.hexdigest()

        target_file_name = hash_str + file_extension

        target_path_download = os.environ.get('FILE_OUTPUT_PATH_FROM_DOWNLOAD')

        target_file_full_path_download = target_path_download + target_file_name

        os.makedirs(os.path.dirname(target_file_full_path_download), exist_ok=True)

        # save image file to dedicated location

        r = requests.get(url, allow_redirects=True)
        open(target_file_full_path_download, 'wb').write(r.content)

        # create smaller image for display / classification
        size = 680, 680

        target_path_thumbnails = os.environ.get('FILE_OUTPUT_PATH_FOR_CLASSIFY')

        target_file_full_path_thumbnail = target_path_thumbnails + target_file_name

        im = Image.open(target_file_full_path_download)
        im.thumbnail(size)

        if file_extension[1:].upper() == "JPG" or file_extension[1:].upper() == "JPEG":
            im.save(target_file_full_path_thumbnail, "JPEG")
        elif file_extension[1:].upper() == "PNG":
            im.save(target_file_full_path_thumbnail, "PNG")
        else:
            raise ValueError("Unsupported type of image {}".format(file_extension[1:].upper()))


        # temporary solution for owner

        message = {
            "sessionId" : hash_str,
            "fileName" : target_file_name,
            "sessionOwner": get_entity_name(url),
            "origPath": url,
            "origEntity": get_entity_name(url),
            "sessionThumbnailPath": target_file_full_path_thumbnail,
            "targetModels" : ["all"],
            "flagCopyThumbs" : True,
            "sourcePage" : source_page,
            "imageDate" : image_date
        }

        inspiration_image = local_session.query(Inspiration_Image).filter_by(urlHash=hash_str).first()
        inspiration_image.classifyPath = target_file_full_path_thumbnail
        local_session.commit()
        local_session.close()

        # create new message in queue for classification
        channel.basic_publish(exchange='', routing_key='classify', body=json.dumps(message, ensure_ascii=False))

        # mark messages acknoledged for inbound channel
        ch.basic_ack(delivery_tag=method.delivery_tag)
    except ValueError:
        print("Value error of the image, unknown type {}".format(ValueError))
    except:
        print("Unknown error in processing the file")
コード例 #22
0
ファイル: helpers.py プロジェクト: tirinox/swap_space_time
def out_frame_name(index, tmp_path):
    return util.get_file_name(index, tmp_path, prefix='pf', ext='npy')
コード例 #23
0
ファイル: Main.py プロジェクト: danielso/ORNNs
# Algorithm Parameters
ORT_INIT=False # intialize all weights to be orthogonal?
PROJ_GRAD=False # Should we project gradient on tangent space to to the Stiefel Manifold (Orthogonal matrices)?
RETRACT=False # Should we do retraction step?
THRESHOLD=0 #error threshold in which we do the retraction step
GAIN=1 # a multiplicative constant we add to all orthogonal matrices
RETRACT_SKIP=1 # How many Batches to wait before we do retraction
opt_mathods_set=['SGD','ADAM']
OPT_METHOD=opt_mathods_set[0]
algorithm={'ORT_INIT':ORT_INIT,'PROJ_GRAD':PROJ_GRAD,'RETRACT':RETRACT,'THRESHOLD':THRESHOLD,
'GAIN':GAIN,'RETRACT_SKIP':RETRACT_SKIP,'OPT_METHOD':OPT_METHOD}

params={'network':network,'training':training,'algorithm':algorithm}
DO_SAVE=True # should we save results?
save_file_name=get_file_name(params)
#%% Intialize network model
    
data, vocab, data_ranges = load_dataset(DATASET)

# define a list of parameters to orthogonalize (recurrent connectivities)
param2orthogonlize=[]      
# The number of features is number of different letters + 1 unknown letter
FEATURES_NUM=len(vocab)+1
# Construct network

# Input layer
l_in = lasagne.layers.InputLayer(
    (BATCH_SIZE, SEQUENCE_LENGTH-1, FEATURES_NUM)) # the input has -1 sequence elength since we through away the last character (it is only predicted - in the output)
layers_to_concat = []
# All recurrent layer
コード例 #24
0
ファイル: helpers.py プロジェクト: tirinox/swap_space_time
def get_input_frame_name(index, tmp_dir):
    return util.get_file_name(index, tmp_dir, prefix='in', ext='jpg')
コード例 #25
0
    LOG, RQ = Logger("CrawlerLog", is_debug=is_debug).getlog()
    LOG.info("""
                启动脚本
                输入文件名: {}
                线程数:{}
                超时等待:{}S
                重试次数:{}
                ID:{}
            """.format(file_path, THREAD_NUMS, TIMEOUT, RETRIES, RQ))
    writer = open("../stat", "w", encoding='utf8')
    # 文件类型 可选 "tang" or "wu"
    if file_type == "fulltext":
        file_from = "wu"
        
        dir_path, file_name = get_file_name(file_path)
        # 输入文件
        tag = time.strftime("%Y%m%d%H%M", time.localtime(time.time()))
        uni_format(file_path, file_from=file_from, id=RQ, logger=LOG, writer=writer)
        # 调度器
        sch = Scheduler("./tmp/{}_referers.tsv".format(RQ), "utf8", "tsv")
        sch.scheduling()
        ret_file_path = "./Logs/{}_ret.txt".format(tag)
        LOG.info("开始合并文件")
        concat(ret_file_path, "./tmp/{}_{}.tsv".format(RQ, file_from), "./datas/{}_ret.csv".format(file_name),
               LOG,
               is_filter_by_word, is_filter_by_input, is_filter_by_country, 
               writer=writer)
       	writer.close()
    elif file_type == "urls":
        sch = Scheduler(file_path, "utf8", "tsv")
コード例 #26
0
    print("Load failed #: {}, Match failed #:{}".format(
        len(LoadFailed), len(MatchFailed)))
    #Use Eid, load_failed, match_failed, Tempaltes, Index, Variables
    match_idx = 0
    load_idx = 0
    head_idx = 0
    template_idx = {}
    for t in Templates.keys():
        template_idx[t] = 0

    Eids = util.load_array(
        util.decoder(os.path.join(temp_path, "Eid.eid"),
                     level_msg[_EncoderLevel]))
    fw = open(
        os.path.join(output_path,
                     util.get_file_name(input_path) + ".col"), "wb")
    for eid in Eids:
        if (eid == -1):
            fw.write((LoadFailed[load_idx] + '\n').encode())
            #print(load_idx)
            load_idx += 1
            continue
        if (eid == 0):
            #try:
            fw.write((MatchFailed[match_idx] + '\n').encode())
            #except:
            #print("tot length: {}, match index: {}".format(len(MatchFailed), match_idx))
            match_idx += 1
            continue

        #Fill up
コード例 #27
0
from C_CurveFitter import spline_fit
from app import startup_manual_refining
from util import MesType, generate_universe, get_file_name

# ================= User Input ================= #
topology = 'inputs/pkA_run3.psf'
trajectory = 'inputs/pkA_run3.dcd'

residue_list = ['ALA', 'DGLU',
                'DLYS']  # list of ammino acids to be CoarseGrained
# residue_list = ['ALA']
# residue_list = ['DA', 'DT', 'DG', 'DC', 'PHOSPHATE', 'RIBOSE']

# ================= Execution ================= #
u = generate_universe(topology, trajectory)
sim_name = get_file_name(topology)
u_cg = coarse_grain(u, residue_list, simulation_name=sim_name, export=True)
measurement_dict = parametize(u_cg, export=True)
startup_manual_refining(measurement_dict, u_cg)

# for measurement_blueprint_name in measurement_dict:
#     measurement_blueprint = measurement_dict[measurement_blueprint_name]
#     aggregate_values = []
#     for measurement_name, measurement_data in measurement_blueprint.items():
#         aggregate_values += measurement_data['values']
#     measurement_type = MesType(len(measurement_blueprint_name))
#     spline_fit(aggregate_values, measurement_type, 0.01)

# with open('outputs/{sim_name}_parametized.txt', 'w+') as outfile:
#     for key, value in measurement_dict.items():
#         atoms = key.count('-') + 1