Ejemplo n.º 1
0
def do_sync(src, dst, overwrite=False, move=False, quiet=False):
    """Copy all files from src to dst.

    If overwrite is True, then existing files and directories in the
    destination directory will be replaced if there is a file or
    directory in the source directory with the same name.

    If move is True, then files will be moved instead of copied. This
    is a useful optimization if the source directory will be deleted
    afterward anyway."""
    filenames = os.listdir(src)
    ensure_nonexistent(dst, filenames, delete=overwrite)
    for f in filenames:
        srcfile = os.path.join(src, f)
        dstfile = os.path.join(dst, f)
        if move:
            if not quiet:
                print "Move %s to %s" % (f, dst)
            move_file(srcfile, dstfile)
        elif os.path.isdir(srcfile):
            if not quiet:
                print "Copy dir %s to %s" % (f, dst)
            copy_tree(srcfile, dstfile, symlinks=True)
        else:
            if not quiet:
                print "Copy %s to %s" % (f, dst)
            copy_file(srcfile, dstfile)
Ejemplo n.º 2
0
def install(install_asset_info, install_path):
    old_install_files = listdir(install_path)
    mapping = {}

    for asset_info in install_asset_info:
        if not asset_info.install:
            continue
        try:
            file_hash = get_file_hash(asset_info.build_path)
            logical_path = asset_info.logical_path
            physical_path = '%s_%s.%s' % (splitext(basename(logical_path))[0],
                                          file_hash,
                                          asset_info.build_path.split('.', 1)[1])

            copy_file(asset_info.build_path, path_join(install_path, physical_path))
            mapping[logical_path] = physical_path

            try:
                old_install_files.remove(physical_path)
            except ValueError:
                pass

        except (IOError, TypeError):
            error('could not install %s' % asset_info.path)

    for path in old_install_files:
        asset_install_path = path_join(install_path, path)
        print 'Removing old install file ' + asset_install_path
        remove_file(asset_install_path)

    return mapping
 def create_db(self):
     if not self.db_exists():
         print('Database for {} does not exist. Creating DB now.'.format(self.streamer_name))
         copy_file(
             src=os.path.join(os.getcwd(), 'data', self.game, 'streamers', 'base', 'test_streamer.db'),
             dst=self.path
         )
     else:
         print('Database for {} already exists'.format(self.streamer_name))
Ejemplo n.º 4
0
def build_source_config(organism_shortname):
    """
    Writes a bioflow file based on the string organism argument

    :param organism_shortname: string falling into one of the three categories. if not valid, an custom
    exception is raised
    :return:
    """
    if organism_shortname not in ref_orgs_shortnames:
        raise Exception('Unsupported organism, %s not in %s. %s',
                        (organism_shortname, ref_orgs_shortnames,
                         'Please modify the sources.ini manually'))

    else:
        write_path = join(configs_rootdir, 'sources.ini')
        copy_file(ref_orgs_locations[organism_shortname], write_path)
        log.info('Active organism set to %s' % organism_shortname)
Ejemplo n.º 5
0
    def install_favicon(self, favicon_path):
        """Installs favicon into the root and at /misc/favicon.ico

        Arguments:
        favicon_path - Path to favicon.
        """
        favicon_filename = basename(favicon_path)
        paths = [
            path_join(self._path, favicon_filename),
            path_join(self._path, 'misc', favicon_filename),
        ]

        for target_path in paths:
            if isfile(target_path):
                os.remove(target_path)

            copy_file(favicon_path, target_path)
Ejemplo n.º 6
0
def download_book_cover_image(book_json,
                              filename='_cover.jpg',
                              size='640',
                              type='1_1',
                              alt_file='cover.jpg'):
    """ Downloads the cover image specified in "book_json".

  book_json -- dictionary object with book metadata. 
  filename -- filename of the output files
  size -- the width of the image in pixels. The "sizes" options (generally) are: 130, 250, 470, 640, 1080, and 1400.
  type -- the aspect ratio of for the cover image. The "types" options (generally) are: "1_1", "2-2_1", and "3_4".
  alt_file -- an identical file to the expected image, but with a different name.

  The default "image_url" (used by the HTML output) is type: "3_4", size: 640.
  """

    # default cover image:
    # cover_img_url = book_json["image_url"]

    # variable size/resolution: (default is 640*640 px)
    cover_img_url_tmplt = book_json["images"]["url_template"]
    cover_img_url = cover_img_url_tmplt.replace('%type%',
                                                type).replace('%size%', size)

    filepath = get_book_pretty_filepath(book_json)
    cover_img_file = os.path.join(filepath, filename)
    cover_img_alt_file = os.path.join(filepath, alt_file)
    if not os.path.exists(filepath):
        os.makedirs(filepath)
    if not os.path.exists(cover_img_file):
        # check if we have the "alternative" image file avaible
        if not os.path.exists(cover_img_alt_file):
            # download the image
            log.info(f'Downloading "{cover_img_url}" as "{filename}"')
            download_request = requests.get(cover_img_url)
            with open(cover_img_file, 'wb') as outfile:
                outfile.write(download_request.content)
        else:
            # copy the image file
            log.debug(f'Copying {alt_file} as {filename}')
            copy_file(cover_img_alt_file, cover_img_file)
    else:
        log.debug(f'{filename} already exists, skipping...')
    return cover_img_file
Ejemplo n.º 7
0
def copy_static_files(template_static=True,
                      content_static=True,
                      out_path=None):
    """Copy static files to the output directory."""
    if out_path == None:
        out_path = config["out_path"]

    if template_static:
        template_static_src = config["template_static_path"]

        if os.path.isdir(template_static_src):
            template_static_dst = os.path.join(
                out_path, os.path.basename(template_static_src))
            copy_tree(template_static_src, template_static_dst)
        else:
            logger.warning(
                ("Template` static path '%s' doesn't exist; " + "skipping.") %
                template_static_src)

    if content_static:
        if "content_static_path" in config:
            if type(config["content_static_path"]) == str:
                content_static_srcs = [config["content_static_path"]]
            else:
                content_static_srcs = config["content_static_path"]

            for content_static_src in content_static_srcs:
                if os.path.isdir(content_static_src):
                    content_static_dst = os.path.join(
                        out_path, os.path.basename(content_static_src))
                    copy_tree(content_static_src, content_static_dst)
                elif os.path.isfile(content_static_src):
                    content_static_dst = os.path.join(
                        out_path, os.path.dirname(content_static_src))
                    logger.debug(
                        "Copying single content_static_path file '%s'." %
                        content_static_src)
                    copy_file(content_static_src, content_static_dst)
                else:
                    logger.warning(
                        "Content static path '%s' doesn't exist; skipping." %
                        content_static_src)
        else:
            logger.debug("No content_static_path in conf; skipping copy")
Ejemplo n.º 8
0
def validate_directories(args, hparams):
    """Validate and arrange directory related arguments."""

    # Validation
    if args.logdir and args.logdir_root:
        raise ValueError(
            "--logdir and --logdir_root cannot be specified at the same time.")

    if args.logdir and args.restore_from:
        raise ValueError(
            "--logdir and --restore_from cannot be specified at the same "
            "time. This is to keep your previous model from unexpected "
            "overwrites.\n"
            "Use --logdir_root to specify the root of the directory which "
            "will be automatically created with current date and time, or use "
            "only --logdir to just continue the training from the last "
            "checkpoint.")

    # Arrangement
    logdir_root = args.logdir_root
    if logdir_root is None:
        logdir_root = LOGDIR_ROOT_Wavenet

    logdir = args.logdir
    if logdir is None:
        logdir = get_default_logdir(logdir_root)
        print('Using default logdir: {}'.format(logdir))
        save_hparams(logdir, hparams)
        copy_file("hparams.py", os.path.join(logdir, "hparams.py"))
    else:
        load_hparams(hparams, logdir)

    restore_from = args.restore_from
    if restore_from is None:
        # args.logdir and args.restore_from are exclusive,
        # so it is guaranteed the logdir here is newly created.
        restore_from = logdir

    return {
        'logdir': logdir,
        'logdir_root': args.logdir_root,
        'restore_from': restore_from
    }
Ejemplo n.º 9
0
def main(args: Sequence[str] = None) -> None:
    nfo("Starting")
    try:
        if args is None:
            args = sys.argv[1:]
        args = parse_args(args)
        setup_logging(args.verbose)
        dbg(f"Invoked with args: {args}")
        transformation = get_transformation(args.transformation)
        document = parse_file(args)
        if args.inplace:
            copy_file(args.input, args.input.with_suffix(".orig"))
            dbg("Saved document backup with suffix '.orig'")
        dbg("Applying transformation.")
        document.root = transformation(document.root)
        write_result(document, args)
    except Exception:
        print_exc()
        raise SystemExit(2)
Ejemplo n.º 10
0
def setup_access_file(*args):
	if utl_file_dh.get() and utl_file_cs.get():
		filename1 = './e2Boston_RsrPlus-empty.mdb'
		time = datetime.now().strftime("%Y%m%d_%H%M_%S")
		filename2 = "./utilization_for_E2Boston" + time + ".mdb"		
		copy_file(filename1, filename2)

		global log # I think this should fix the bug of having the old log in the current error msg
		log = []

		try:
			write_access_file(filename2)
		except:
			delete_file(filename2)
			open_error_log_window()
			raise

	else:
		messagebox.showinfo(message='Choose both CS utilization file and DH utilization file.')		
Ejemplo n.º 11
0
    def load_manifest(self, current_version):
        try:
            copy_file("data.pls", "save/data.pls")
            move_file("PlayListStore4.exe", "save/PlayListStore4.exe")

            versions = download(VersionsPath).text.split('=')
            if versions[-1] != current_version:
                if current_version == versions[-2]:
                    manifest = download(ManifestPath +
                                        "%s.txt" % versions[-1]).text
                    self.update(manifest)
                else:
                    for version in versions[versions.index(current_version) +
                                            1:]:
                        manifest = download(ManifestPath + "%s.txt" % version)
                        self.update(manifest)
        except Exception as e:
            print(e)
            open("UPDATE_ERRORS.txt", 'w').write(str(e))
            self.close()
Ejemplo n.º 12
0
def extract(item,
            atom_indices='all',
            structure_indices='all',
            output_filename=None):

    if output_filename is None:
        output_filename = temp_filename(extension='crd')

    if (atom_indices is 'all') and (structure_indices is 'all'):

        from shutil import copy as copy_file
        from molsysmt._private.files_and_directories import temp_filename
        copy_file(item, output_filename)
        tmp_item = output_filename

    else:

        raise NotImplementedError()

    return tmp_item
Ejemplo n.º 13
0
def setup_access_file(*args):
    if utl_file_dh.get() and utl_file_cs.get():
        filename1 = './e2Boston_RsrPlus-empty.mdb'
        time = datetime.now().strftime("%Y%m%d_%H%M_%S")
        filename2 = "./utilization_for_E2Boston" + time + ".mdb"
        copy_file(filename1, filename2)

        global log  # I think this should fix the bug of having the old log in the current error msg
        log = []

        try:
            write_access_file(filename2)
        except:
            delete_file(filename2)
            open_error_log_window()
            raise

    else:
        messagebox.showinfo(
            message='Choose both CS utilization file and DH utilization file.')
Ejemplo n.º 14
0
def dl(url, local_file, cache=True, headers=None):
    logger = logging.getLogger('httpext')
    last_modified = get_last_modified_time(url, headers=headers)
    filetime = None

    if last_modified:
        filetime = calendar.timegm(last_modified)

    cache_dir = path_join(os.environ['HOME'], '.cache', 'httpext')
    cut = len(dirname(url)) + 1
    cached_fn = '%05d%s' % (cut, re.sub('^https?\://', '', url).replace('/', '2'))
    cached_file = path_join(cache_dir, cached_fn)

    if cache:
        logger.debug('cache = True')

        if not isdir(cache_dir):
            logger.debug('Creating cache directory %s' % (cache_dir))
            os.makedirs(cache_dir)

        if fs.isfile(cached_file) and \
                fs.has_same_time(cached_file, filetime):
            logger.debug('Has cached file %s and file has same time' % (cached_file))
            copy_file(cached_file, path_join(dirname(local_file), local_file))

            return

    if headers:
        url = Request(url, None, headers)

    req = urlopen(url)

    with open(local_file, 'wb') as f:
        f.write(req.read())
        f.close()

        if filetime:
            setfiletime(local_file, (filetime, filetime))

        if cache:
            copy_file(local_file, cached_file)
Ejemplo n.º 15
0
def run_leak_check(program, options, file):
    ext = os.path.splitext(file)[1]
    temp_file = f"{tc.valgrind_temp_basename}{ext}"
    if os.path.isfile(file):
        copy_file(file, temp_file)
    else:
        temp_file = file
    for option in options:
        if not option == "":
            option = f" -{option}"
        if system() == 'Linux':
            command = f"valgrind ./{program}{option} {temp_file}"
        elif system() == 'Darwin':
            valgrind_args = "valgrind  --leak-check=full --show-leak-kinds=all"
            command = f"{valgrind_args} ./{program}{option} {temp_file}"
        valgrind_output = run(command, stdout=PIPE, stderr=PIPE, shell=True)
        valgrind_output = valgrind_output.stderr.decode('utf-8')
        print_cur_task(f"Checking for leaks '{tc.some_color}"
                       f"{program}{option} {temp_file}{tc.color_clear}'")
        check_valgrind_output(valgrind_output, program, file)
    if os.path.isfile(temp_file):
        os.remove(temp_file)
Ejemplo n.º 16
0
def prepare_dirs(config, hparams):
    if hasattr(config, "data_paths"):
        config.datasets = [os.path.basename(data_path) for data_path in config.data_paths]
        dataset_desc = "+".join(config.datasets)

    if config.load_path:
        config.model_dir = config.load_path
    else:
        config.model_name = "{}_{}".format(dataset_desc, get_time())
        config.model_dir = os.path.join(config.log_dir, config.model_name)

        for path in [config.log_dir, config.model_dir]:
            if not os.path.exists(path):
                os.makedirs(path)

    if config.load_path:
        load_hparams(hparams, config.model_dir)
    else:
        setattr(hparams, "num_speakers", len(config.datasets))

        save_hparams(config.model_dir, hparams)
        copy_file("hparams.py", os.path.join(config.model_dir, "hparams.py"))
Ejemplo n.º 17
0
def _main():
    """ Entrypoint """
    parser = argparse.ArgumentParser(description="replace_spaces")
    parser.add_argument("file_path", nargs="+", help="File(s) to handle.")
    parser.add_argument("--char",
                        type=str,
                        default=".",
                        help="Character used to replace spaces. (default '.')")
    parser.add_argument("-c",
                        action="store_true",
                        help="If given copies the file(s) with the new name.")
    parser.add_argument("-n",
                        action="store_true",
                        help="Dry run, only prints the new name.")
    args = parser.parse_args()
    file_paths = args.file_path
    replace_char = args.char
    copy = args.c
    dry = args.n
    if dry:
        print("Dry mode, would {} modified files like".format(
            "copy" if copy else "rename"))

    for file_path in file_paths:
        base = os.path.basename(file_path)
        path = os.path.dirname(file_path)

        for space in _UNICODE_SPACES:
            base = base.replace(space, replace_char)

        out_path = os.path.join(path, base)

        if copy and not dry:
            copy_file(file_path, out_path)
        elif not dry:
            os.rename(file_path, out_path)
        else:
            print("- {}".format(out_path))
Ejemplo n.º 18
0
    def load_rom(self, clean_name, new_name):
        """Opens the files associated with the clean rom and the modded rom"""

        pure_rom_sum = '21f3e98df4780ee1c667b84e57d88675'
        modded_rom_sum = 'idk'
        pure_rom_size = 3145728

        # First, make a copy of clean_name as new_name
        copy_file(clean_name, new_name)

        checksum = _checksum(new_name)
        filesize = _file_length(new_name)
        # TODO: Check and decapitate...
        # But what to do with the clean one? If we are going to
        # copy data from the clean one, we need it to be unheadered,
        # but we also want to keep it unchanged.
        assert filesize == pure_rom_size, "Rom is headered!"
        # if filesize != pure_rom_size:
        # print("This is a headered rom, lets cut it off")
        # self.decapitate_rom(new_name)
        # checksum = _checksum(new_name)

        self.clean_rom = open(clean_name, "r+b")

        # Mod the rom if necessary
        if checksum == pure_rom_sum:
            #print("Looks like a valid pure rom, we'll mod it first")
            self.new_rom = open(new_name, "r+b")
            self.mod_rom()
        # Load it if it already has the right mods
        # TODO: do we want this?
        elif checksum == modded_rom_sum:
            #print("This is already modded, we can just load it")
            self.new_rom = open(new_name, "r+b")
        else:  #TODO: restrict once we know what the checksums are supposed to be.
            #print("Something is wrong with this rom")
            self.new_rom = open(new_name, "r+b")
            self.mod_rom()
Ejemplo n.º 19
0
def extract(item, atom_indices='all', structure_indices='all', copy_if_all=True, check=True):

    if check:

        digest_item(item, 'file:prmtop')
        atom_indices = digest_atom_indices(atom_indices)
        structure_indices = digest_structure_indices(structure_indices)

    if (atom_indices is 'all') and (structure_indices is 'all'):

        if copy_if_all:

            from shutil import copy as copy_file
            copy_file(item, output_filename)
            tmp_item = output_filename

        else:
            tmp_item = item
    else:

        raise NotImplementedError()

    return tmp_item
Ejemplo n.º 20
0
def fix_names(target):
    """
    Checks that the name of the file_item matches the exif data
    contained in the file_item

    :param target:
    :return:
    """
    files = u.find_all_files(
        target, ('.jpg', '.mp4', '.mov', '.jpeg', '.png'))
    print('Scanning {} photos'.format(len(files)))
    for idx, file_item in enumerate(files):
        folder_name = os.path.join(
            target, u.generate_foldername_from_meta(file_item))
        file_name = u.generate_filename_from_meta(file_item)
        u.create_directory(folder_name)
        LOGGER.info('Processing file %s', file_name)

        if os.path.exists(os.path.join(folder_name, file_name)):
            file_name = '{}_DUP_{}{}'.format(
                file_name, u.get_time_stamp(), idx)
        copy_file(file_item, os.path.join(folder_name, file_name))
        os.remove(file_item)
Ejemplo n.º 21
0
 def _compile(self):
     assets, mod_files, cache_data = self._collect_asset_state()
     if len(mod_files) == 0:
         return
     # Walk over all files in the neuron mod path and remove them then copy over all
     # `mod_files` that have to be compiled. Afterwards run a platform specific
     # compile command.
     neuron_mod_path = self.get_neuron_mod_path()
     _remove_tree(neuron_mod_path)
     # Copy over fresh mods
     for file in mod_files:
         copy_file(file, neuron_mod_path)
     # Platform specific compile
     if sys.platform == "win32":
         self._compile_windows(neuron_mod_path)
     elif sys.platform in ("linux", "darwin"):
         self._compile_linux(neuron_mod_path)
     else:
         raise NotImplementedError(
             "Only linux and win32 are supported. You are using " + sys.platform
         )
     # Update the cache with the new mod directory hashes.
     self.update_cache(cache_data)
Ejemplo n.º 22
0
def copy_source(src, dest, overwrite: bool = False):
    """
    Copy content from src to dest folder.
    """
    src = calculate_path(src)
    dest = calculate_path(dest)
    print(f'copy_source: {src} -> {dest}')
    if path.isdir(src):
        if overwrite:
            uncopy_source(dest)
        if path.exists(dest):
            raise Exception('Could not copy. Please try again.')
        p = copytree(src, dest)
        print(p)
    elif path.isfile(src):
        content_path = path.dirname(src)
        if not path.exists(content_path):
            makedirs(content_path)
        p = copy_file(src, dest)
        print(p)
 def copy(self, src, dst):
     if not os.path.exists(dst):
         raise InvalidPathException(dst + ' is not valid path')
     for video_file in self.video_files_to_copy:
         copy_file(os.path.join(src, video_file), dst)
Ejemplo n.º 24
0
                lyr_command = build_lyr_command(filename, "-p")
                subprocess.call(lyr_command, shell=True)
                message("o", filename)
                rel_filename = get_relpath(os.path.join(src,filename))
                hashes[rel_filename] = sha256(os.path.join(src,filename))

            if args == '':
                src = tmp_src

            # update hashes
            with open(os.path.join(conf_dir,"hashes.json"), 'w') as js:
                json.dump(hashes,js,indent=4)

        elif cmd == 'new':
            name = os.path.join(src,md(args).replace(' ','_'))
            p = copy_file(os.path.join(conf_dir,'template.md'),name)
            txt = name[:-2] + "txt"
            if os.path.isfile(txt) and os.path.getsize(txt) == 0:
                os.remove(txt)
                message("o", txt + ' removed')
            message('o', p + ' created')
            subprocess.call('{} {}'.format(editor,name), shell=True)

        elif cmd in ['ed', 'edit', 'vi', 'vim', 'code']:
            subprocess.call('{} {}'.format(editor,os.path.join(src,md(args))), shell=True)

        elif cmd == "status":
            for x in get_changed_files():
                if x[1] == 'new':
                    message('n', x[0])
                elif x[1] == 'changed':
Ejemplo n.º 25
0
# Arachnoid - Interpreter
Ejemplo n.º 26
0
def update_css():
    original_css_path = abspath(join_path(rst_doc_root, "hjb_v1.css"))
    site_css_path = abspath(join_path(site_root, "hjb_v1.css"))
    copy_file(original_css_path, site_css_path)
Ejemplo n.º 27
0
if not path.isdir(join(PYTHON, "Lib", "site-packages", "miney")):
    logger.info("Installing miney with pip")
    run([join(PYTHON, "python.exe"), "-m", "pip", "install", "miney"], cwd=PYTHON)
else:
    logger.info("Found miney in python packages")

if not path.isfile(join(MINEY, "miney_launcher.exe")):
    logger.info("Installing launcher")

    if not path.isdir(join(MINEY, "Miney")):
        os.mkdir(join(MINEY, "Miney"))

    if not path.isdir(join(MINEY, "Miney", "examples")):
        os.mkdir(join(MINEY, "Miney", "examples"))

    copy_file(join(REPOS, "launcher", "win32", "launcher.exe"), join(MINEY, "miney_launcher.exe"))
    copy_file(join(REPOS, "launcher", "launcher.py"), join(MINEY, "Miney", "launcher.py"))
    copy_file(join(REPOS, "launcher", "quickstart.py"), join(MINEY, "Miney", "quickstart.py"))
    copy_file(join(REPOS, "launcher", "LICENSE"), join(MINEY, "Miney", "LICENSE"))
    copy_tree(join(REPOS, "launcher", "res"), join(MINEY, "Miney", "res"))
else:
    logger.info("Found launcher")

if not path.isdir(join(MINEY, "Minetest", "worlds", "Miney")):
    logger.info("Create miney default world")
    if not path.isdir(join(MINEY, "Minetest", "worlds", "Miney")):
        os.mkdir(join(MINEY, "Minetest", "worlds", "Miney"))

    with open(join(MINEY, "Minetest", "worlds", "Miney", "world.mt"), "w") as f:
        f.write("enable_damage = true\n")
        f.write("creative_mode = false\n")
Ejemplo n.º 28
0
def update_css():
    original_css_path = abspath(join_path(rst_doc_root, "hjb_v1.css"))
    site_css_path = abspath(join_path(site_root, "hjb_v1.css"))
    copy_file(original_css_path, site_css_path)
Ejemplo n.º 29
0
 def safe_copy(source, target):
     """ Ensure that targets's parent directory(ies) exist"""
     if not path_exists(dirname(target)):
         makedirs(dirname(target))
     copy_file(source, target)
Ejemplo n.º 30
0
def main():
    print('[!] Starting Twitch Charity Bot')
    print(
        '[!] More information can be found at: https://github.com/purrcat259/twitch-charity-bot'
    )
    print('[+] Opening database connection')
    database = Pysqlite(DATABASE_NAME, DATABASE_NAME + '.db')
    print('[+] Initialising bots')
    # Determine if any extra bots need to be initialised here and store their instances in a list
    active_bots = []
    # Initialise the default bot
    bot_details = bot_config.purrbots[0]
    purrbot = Pytwitch(name=bot_details['name'],
                       token=bot_details['token'],
                       verbose=True)
    active_bots.append(purrbot)
    # check if the streams will use any non-default bots
    active_streams = [
        stream for stream in charity.active_charity_streams if stream['active']
    ]
    for stream in active_streams:
        if not stream['bot_name'] == 'default':
            print('[+] Team {} require bot with ID: {}'.format(
                stream['team_name'], stream['bot_name']))
            for bot in bot_config.purrbots:
                if bot['name'] == stream['bot_name']:
                    print('[+] Bot found, initialising {}'.format(
                        stream['bot_name']))
                    # Assign the team name as an identifier for easy comparison later on
                    non_default_bot = Pytwitch(name=bot['name'],
                                               token=bot['token'],
                                               identifier=stream['team_name'],
                                               verbose=True)
                    active_bots.append(non_default_bot)
                    break
            else:
                print(
                    '[-] Bot could not be found, please check your config then try again'
                )
                input('[?] Please press any key to exit')
                exit()
        else:
            print('[+] Team {} will use the standard purrbot359'.format(
                stream['team_name']))
    print('[+] Charity bot will start for the following streams:')
    print('[+]\tTeam\t\tBot')
    for stream in active_streams:
        if stream['bot_name'] is 'default':
            print('\t{}\t\t{}'.format(stream['team_name'],
                                      active_bots[0].name))
        else:
            print('\t{}\t\t{}'.format(
                stream['team_name'],
                get_non_default_bot(active_bots, stream['team_name']).name))
    stream_pause_tick = int(CHECK_TICK / len(active_streams))
    print(
        '[+] Global pause tick: {} seconds Bot pause tick: {} seconds'.format(
            CHECK_TICK, stream_pause_tick))
    continue_value = input('[?] Continue? y/n: ')
    if not continue_value.lower().startswith('y'):
        exit()
    update_timestamp = strftime('%d/%m/%Y %X')
    for stream in active_streams:
        # Print to console first
        print(
            '[!] Purrbot is online at {} for stream team: {}, streamers: {}, watching at {}. Test mode: {}'
            .format(update_timestamp, stream['team_name'],
                    stream['streamer_list'], stream['donation_url'],
                    TESTING_MODE))
        # get the default bot
        bot = active_bots[0]
        # override if you need another one
        if stream['bot_name'] is not 'default':
            # Get the bot needed, by getting the index on the name
            bot_names = [obj.return_identity() for obj in active_bots]
            bot_index_needed = bot_names.index(stream['team_name'])
            bot = active_bots[bot_index_needed]
        chat_string = '{} is now online for streamers: {}. Watching for donations at {}'.format(
            bot.name, stream['streamer_list'], stream['donation_url'])
        bot.post_in_streamer_channels(streamer_list=get_online_streamers(
            streamer_list=stream['streamer_list']),
                                      chat_string=chat_string,
                                      pause_time=2)
    # build extra active stream data from what we already have
    for stream in active_streams:
        try:
            donation_amount_data = charity.get_donation_amount(
                url=stream['donation_url'])
        except Exception as e:
            print('[-] Unable to scrape website: {}'.format(e))
            input('Press any key to exit')
            exit()
        stream['amount_raised'] = donation_amount_data[0]
        stream['amount_goal'] = donation_amount_data[1]
        stream['amount_percentage'] = donation_amount_data[2]
        stream['prompt_index'] = 0
        stream['cycle_count'] = 0
        stream['cycles_for_prompt'] = (stream['prompt_tick'] * 60) / CHECK_TICK
    # Start the main loop
    while True:
        for stream in active_streams:
            stream_bot = get_bot(
                bot_list=active_bots,
                bot_id=stream['team_name'],
            )
            print('[+] {} is on cycle: {} for team: {}'.format(
                stream_bot.name, stream['cycle_count'], stream['team_name']))
            try:
                donation_amount_data = charity.get_donation_amount(
                    url=stream['donation_url'])
                new_amount_raised = donation_amount_data[0]
            except Exception as e:
                print('[-] Website scrape error: {}'.format(e))
                # Skip past this current stream cycle if the scrape does not work
                continue
            # When a new donation is present, this will be true
            if not new_amount_raised == stream['amount_raised']:
                # update the timestamp
                update_timestamp = strftime('%d/%m/%Y %X')
                # get a float value of the new donation
                new_donation = get_amount_difference(stream['amount_raised'],
                                                     new_amount_raised)
                # assign the new, higher value to the stream dictionary
                stream['amount_raised'] = new_amount_raised
                # check that the donation amount is not 0.0, encountered this in the past
                if not new_donation == 0.0:
                    # round up the donation, because of floating point values going .999999
                    new_donation = round(new_donation, 2)
                    print('[!] NEW DONATION DETECTED! {}{} at {}'.format(
                        stream['donation_currency'], new_donation,
                        update_timestamp))
                # insert the donation into the database
                insert_donation_into_db(db=database,
                                        db_table=DATABASE_TABLE,
                                        amount=new_amount_raised,
                                        verbose=True)
                # build the string to place in the text file in the form: amount goal percentage
                text_file_string = '{} {} {}'.format(donation_amount_data[0],
                                                     donation_amount_data[1],
                                                     donation_amount_data[2])
                # Write the donation data to the text file
                write_text_file(file_name=stream['team_name'],
                                file_lines=text_file_string,
                                verbose=True)
                # Copy the file to the API directory
                try:
                    source_file = stream['team_name'] + '.txt'
                    destination = '/home/digitalcat/apache-flask/assets/charity/'
                    copy_file(src=source_file, dst=destination)
                    print('File successfully copied')
                except Exception as e:
                    print('[-] Copy failed: {}'.format(e))
                # build the string to post to channels
                chat_string = 'NEW DONATION OF {}{}! {}{} out of {}{} raised! {}% of the goal has been reached. Visit {} to donate!'.format(
                    stream['donation_currency'], new_donation,
                    stream['donation_currency'], stream['amount_raised'],
                    stream['donation_currency'], donation_amount_data[1],
                    donation_amount_data[2], stream['donation_url'])
                # post the chat string to all streamers
                purrbot.post_in_streamer_channels(
                    streamer_list=get_online_streamers(
                        streamer_list=stream['streamer_list'], verbose=True),
                    chat_string=chat_string,
                    pause_time=2)
            else:
                # if a new donation has not been detected, then check if we have to post a prompt
                if stream['cycle_count'] == stream['cycles_for_prompt']:
                    # reset the cycle counter
                    stream['cycle_count'] = 0
                    prompt_string = ''
                    # do a round robin between the chat strings available, according to the prompt index of the stream
                    if stream[
                            'prompt_index'] == 0:  # money raised, schedule and donation link
                        prompt_string = '{}{} has been raised by team {} for Gameblast so far! You too can donate at: {}'.format(
                            stream['donation_currency'],
                            stream['amount_raised'], stream['team_name'],
                            stream['donation_url'])
                    elif stream['prompt_index'] == 1:
                        current_streamers = get_online_streamers(
                            streamer_list=stream['streamer_list'],
                            verbose=True)
                        # if only 1 streamer is online, it doesn't make sense to post a single twitch link
                        if len(current_streamers) == 1:
                            prompt_string = '{}{} has been raised by team {} for Gameblast so far! You too can donate at: {}'.format(
                                stream['donation_currency'],
                                stream['amount_raised'], stream['team_name'],
                                stream['donation_url'])
                        else:
                            prompt_string = 'Watch all the current team {} streamers here: {}'.format(
                                stream['team_name'],
                                return_kadgar_link(
                                    get_online_streamers(
                                        streamer_list=stream['streamer_list'],
                                        verbose=True)))
                    purrbot.post_in_streamer_channels(
                        streamer_list=get_online_streamers(
                            streamer_list=stream['streamer_list'],
                            verbose=True),
                        chat_string=prompt_string,
                        pause_time=2)
                    # iterate the prompt index, reset it if it reaches the limit (depends on amount of prompts)
                    stream['prompt_index'] += 1
                    if stream[
                            'prompt_index'] == 2:  # TODO: Set this value somewhere else rather than manual?
                        stream['prompt_index'] = 0
                else:
                    stream['cycle_count'] += 1  # iterate the counter
                    # print how much time to the next prompt
                    cycles_left = int(stream['cycles_for_prompt'] -
                                      stream['cycle_count'] + 1)
                    time_left = round((cycles_left / 60) * CHECK_TICK, 1)
                    print(
                        '[+] Team: {}\n[+] Last donation at: {}\n[+] Next prompt: {} minutes\n[+] Amount raised: {}{}, '
                        .format(stream['team_name'], update_timestamp,
                                time_left, stream['donation_currency'],
                                stream['amount_raised']))
            pause(initial_prompt='Holding for team {}'.format(
                stream['team_name']),
                  amount=stream_pause_tick)
Ejemplo n.º 31
0
 def run(src, dst, verbose=False, args=None):
     if verbose:
         print 'Copy ' + src + ' -> ' + dst
     copy_file(src, dst)
     return True
Ejemplo n.º 32
0
    data_processor = DataProcessor(DISTANCES)

    PROC_DISTANCES = data_processor.execute_pipeline()

    PROBLEM_DEFINITION = load_problem_definition()

    SCHED_OPT = ScheduleOptimizer(PROC_DISTANCES, PROBLEM_DEFINITION)

    SOLUTION_DICT = SCHED_OPT.execute_optimizer_pipeline()

    SCHED_VIS = ScheduleVisualizer(PROC_DISTANCES, SOLUTION_DICT)
    os.makedirs(f"02_reports/{CURRENT_RUN}")

    SCHED_VIS.execute_visualizer_pipeline(CURRENT_RUN)

    copy_file(
        "03_notebook_templates/trip_report_template.ipynb",
        f"02_reports/{CURRENT_RUN}/trip_report_{CURRENT_RUN}.ipynb",
    )

    os.system(
        (
            "jupyter nbconvert --execute --no-input --no-prompt --to html "
            + f"02_reports/{CURRENT_RUN}/trip_report_{CURRENT_RUN}.ipynb"
        )
    )

    os.remove(f"02_reports/{CURRENT_RUN}/trip_report_{CURRENT_RUN}.ipynb")

Ejemplo n.º 33
0
 def safe_copy(source, target):
     """ Ensure that targets's parent directory(ies) exist"""
     if not path_exists(dirname(target)):
         makedirs(dirname(target))
     copy_file(source, target)        
Ejemplo n.º 34
0
    def assemble_image(self,
                       base,
                       driver_fish,
                       application_fish,
                       dell_recovery_package,
                       create_fn,
                       version, iso, platform, no_update, sender=None, conn=None):
        """Assemble pieces that would be used for building a BTO image.
           base: mount point of base image (or directory)
           fish: list of packages to fish
           dell_recovery_package: a dell-recovery package to inject
           create_fn: function to call for creation of ISO
           version: version for ISO creation purposes
           iso: iso file name to create
           platform: platform name to identify
           no_update: don't include newer dell-recovery automatically"""
        logging.debug("assemble_image: base %s, driver_fish %s, application_fish\
%s, recovery %s, create_fn %s, version %s, iso %s, platform %s, no_update %s" %
                    (base, driver_fish, application_fish, dell_recovery_package,
                    create_fn, version, iso, platform, no_update))

        self._reset_timeout()

        base_mnt = self.request_mount(base, "r", sender, conn)

        assembly_tmp = tempfile.mkdtemp()
        atexit.register(walk_cleanup, assembly_tmp)

        #copy the base iso/mnt point/etc
        white_pattern = re.compile('')
        w_size = white_tree("size", white_pattern, base_mnt)
        self.start_sizable_progress_thread(_('Adding in base image'),
                                           assembly_tmp,
                                           w_size)
        white_tree("copy", white_pattern, base_mnt, assembly_tmp)
        self.stop_progress_thread()

        #Add in driver FISH content
        if len(driver_fish) > 0:
            # record the base iso used
            self.xml_obj.set_base(os.path.basename(base))

            self._process_driver_fish(driver_fish, assembly_tmp)
            logging.debug("assemble_image: done inserting driver fish")

        #Add in application FISH content
        length = float(len(application_fish))
        if length > 0:
            dest = os.path.join(assembly_tmp, 'srv')
            os.makedirs(dest)
            for fishie in application_fish:
                with open(fishie, 'rb') as fish:
                    md5sum = md5(fish.read()).hexdigest()
                new_name = application_fish[fishie]
                self.xml_obj.append_fish('application', os.path.basename(fishie), md5sum, new_name)
                if fishie.endswith('.zip'):
                    new_name += '.zip'
                elif os.path.exists(fishie) and tarfile.is_tarfile(fishie):
                    new_name += '.tgz'
                shutil.copy_file(fishie, os.path.join(dest, new_name))

        #If dell-recovery needs to be injected into the image
        if dell_recovery_package:
            self.xml_obj.replace_node_contents('deb_archive', dell_recovery_package)
            dest = os.path.join(assembly_tmp, 'debs')
            if not os.path.isdir(dest):
                os.makedirs(dest)
            if 'dpkg-repack' in dell_recovery_package:
                logging.debug("Repacking dell-recovery using dpkg-repack")
                call = subprocess.Popen(['dpkg-repack', 'dell-recovery'],
                                        cwd=dest, universal_newlines=True)
                (out, err) = call.communicate()
            else:
                logging.debug("Adding manually included dell-recovery package, %s", dell_recovery_package)
                shutil.copy(dell_recovery_package, dest)

        function = getattr(Backend, create_fn)
        function(self, assembly_tmp, version, iso, platform, no_update)
Ejemplo n.º 35
0
def update_the_css_file():
    original_css_path = join_path(rst_doc_root, css_file)
    site_css_path = join_path(site_root, css_file)
    copy_file(original_css_path, site_css_path)
Ejemplo n.º 36
0
def save_backup(m):
    p = m.path
    b = p + strftime('-%Y%m%d_%H%M%S.bak')
    copy_file(p, b)
    print('created backup copy %s' % b)
Ejemplo n.º 37
0
def create_config(args):
    from .helpers import get_config_path
    from shutil import copy2 as copy_file
    import os

    copy_file(get_config_path(args.template), "./" + args.output)
Ejemplo n.º 38
0
 def case_one(self, source: list):
     for i in source:
         for j in self.paths:
             copy_file(i, j, follow_symlinks=True)
Ejemplo n.º 39
0
 def run(src, dst, verbose=False, args=None):
     if verbose:
         print "Copy " + src + " -> " + dst
     copy_file(src, dst)
     return True
Ejemplo n.º 40
0
 def add_package(self, path):
     self.all_directory.mkdir(exist_ok=True)
     copy_file(str(path), str(self.all_directory))
Ejemplo n.º 41
0
 def run(src, dst, verbose=False, args=None):
     if verbose:
         print 'Copy ' + src + ' -> ' + dst
     copy_file(src, dst)
     return True
Ejemplo n.º 42
0
 def copy_file(self, from_path, to_path):
     shutil.copy_file(from_path, to_path)
Ejemplo n.º 43
0
        # Find the original image
        ori_image_basename = os.path.basename(
            f.image_path).split('-facade-')[0] + '.jpg'
        ori_xml_basename = os.path.basename(
            f.image_path).split('-facade-')[0] + '.xml'

        ori_image_path = None
        for rt in args.path:
            fns = glob(os.path.join(rt, '**', ori_image_basename),
                       recursive=True)
            if len(fns) > 0:
                ori_image_path = fns[0]
                break

        ori_xml_path = None
        for rt in args.path:
            fns = glob(os.path.join(rt, '**', ori_xml_basename),
                       recursive=True)
            if len(fns) > 0:
                ori_xml_path = fns[0]
                break

        merged_xml_path = os.path.join(args.root, args.folder,
                                       ori_xml_basename)
        if not os.path.isfile(merged_xml_path):
            shutil.copy_file(ori_xml_path, merged_xml_path)

        ori_f = FacadeSubImage(merged_xml_path,
                               root=args.root,
                               roots=args.path)
Ejemplo n.º 44
0
 def copy_file(self, source, target):
     try:
         self.log.write('copying {} to {}'.format(source, target))
         copy_file(source, target)
     except Exception as e:
         self.log.exit_nicely('fail while copying {}'.format(source), e)
Ejemplo n.º 45
0
def process_new_submissions(submissions_dir):
    verifystudentids = str()
    verifyfiles = str()
    for student in os.listdir(submissions_dir):
        submitted = ['-'] * num_of_exercises
        problematic_files = list()
        if os.path.isdir(os.path.join(submissions_dir, student)):
            student_id = match_folder_name_to_num(student)
            if student_id is None or not student.startswith(
                    get_name(student_id)):
                verifystudentids += "Please verify whether the student {} (folder {}) has the student number {}.\n".format(
                    get_name(student_id), student, student_id)
            for submission in os.listdir(os.path.join(submissions_dir,
                                                      student)):
                if submission.endswith('.zip') or submission.endswith(
                        '.tar.gz') or submission.endswith('.tar'):
                    where_to_unzip = os.path.join(submissions_dir, student,
                                                  'tmp')
                    archive = os.path.join(submissions_dir, student,
                                           submission)
                    if submission.endswith('.zip'):
                        arch_ref = zipfile.ZipFile(archive)
                    else:
                        arch_ref = tarfile.open(
                            archive,
                            'r:gz' if submission.endswith('.tar.gz') else 'r:')
                    arch_ref.extractall(where_to_unzip)
                    for root, _, files in os.walk(where_to_unzip,
                                                  topdown=False):
                        for f in files:
                            if f.endswith('.c') or f.endswith('.C'):
                                subm_file = os.path.join(root, f)
                                to_grade_file = os.path.join(
                                    '{}', '{}_'.format(student_id) + f)
                                try:
                                    exercise_num = re.findall("(\d+)", f)[-1]
                                    submitted[int(exercise_num) - 1] = '+'
                                    if not_graded_yet(student_id,
                                                      int(exercise_num)):
                                        copy_file(
                                            subm_file,
                                            to_grade_file.format(
                                                exs_to_grade_dirs[
                                                    int(exercise_num) - 1]))
                                except:
                                    exercise_num = 'UNK'
                                    # print("Exercise {} (exercise #{}), has been found for student {} ({}), in the folder {}.".format(f, exercise_num, student_id, get_name(student_id), student))
                            elif f not in noproblem_files or exercise_num == 'UNK':
                                problematic_files.append(f)
            if problematic_files:
                verifyfiles += "File(s) {} have been found for student {} ({}), in the folder {}.\n".format(
                    ", ".join(problematic_files), student_id,
                    get_name(student_id), student)
            print_status(
                "Student {} ({}) submitted the following exercises: {} (the folder {})."
                .format(student_id, get_name(student_id), " ".join(submitted),
                        student))
            for ex_idx, status in enumerate(submitted):
                if status == '-':
                    col = colnum_string(ex_idx + 2)
                    row = str(studentids.index(int(student_id)) + 2)
                    marks_wb["TP3"][col + row] = 0
                    marks_wb.save(marks)
    print_problem(verifystudentids)
    print_problem(verifyfiles)
Ejemplo n.º 46
0
def train_model(args, x_train, y_train, x_test, y_test, hyperparameters,
                log_dir):
    def signal_handler(signal, frame):
        model.stop_training = True

    signal.signal(signal.SIGINT, signal_handler)

    print('\n[*] Building Model')
    model = Sequential()

    random_uni = RandomNormal(mean=0.0, stddev=0.05, seed=None)

    def custom_sigmoid_activation(x):
        return 1.7159 * K.tanh(2 / 3 * x)

    # We initially follow the architecture given here : https://papers.nips.cc/paper/5782-character-level-convolutional-networks-for-text-classification.pdf
    model.add(
        Conv1D(hyperparameters['filters'],
               hyperparameters['kernel_size'][0],
               kernel_initializer=random_uni,
               input_shape=(hyperparameters['max_features'],
                            hyperparameters['vect_size'])))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    model.add(MaxPooling1D(pool_size=2, strides=None))

    model.add(
        Conv1D(hyperparameters['filters'],
               hyperparameters['kernel_size'][1],
               kernel_initializer=random_uni,
               strides=1))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    model.add(MaxPooling1D(pool_size=2, strides=None))

    model.add(
        Conv1D(hyperparameters['filters'],
               hyperparameters['kernel_size'][1],
               kernel_initializer=random_uni,
               strides=1))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    model.add(
        Conv1D(hyperparameters['filters'],
               hyperparameters['kernel_size'][1],
               kernel_initializer=random_uni,
               strides=1))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    model.add(
        Conv1D(hyperparameters['filters'],
               hyperparameters['kernel_size'][1],
               kernel_initializer=random_uni,
               strides=1))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    model.add(MaxPooling1D(pool_size=2, strides=None))

    model.add(Flatten())

    model.add(
        Dense(hyperparameters['hidden_dims'], kernel_initializer=random_uni))
    model.add(BatchNormalization())
    model.add(Activation('sigmoid'))
    model.add(Dropout(0.5))
    model.add(
        Dense(hyperparameters['hidden_dims'], kernel_initializer=random_uni))
    model.add(BatchNormalization())
    model.add(Activation('sigmoid'))
    model.add(Dropout(0.5))
    model.add(
        Dense(get_auth_number(),
              kernel_initializer=random_uni,
              activation='softmax'))

    sgd = SGD(lr=hyperparameters['initial_lr'], momentum=0.9)

    last_val_loss = float("inf")

    class BoldScheduler(Callback):
        def __init__(self):
            self.last_val_loss = float("inf")

        def on_epoch_end(self, epoch, logs={}):

            curr_val_loss = logs.get('val_loss')
            lr = K.get_value(model.optimizer.lr)

            if (self.last_val_loss > curr_val_loss):
                K.set_value(model.optimizer.lr, lr * 1.1)
                print("[*] lr changed from {:.6f} to {:.6f}".format(
                    lr, K.get_value(model.optimizer.lr)))
            elif curr_val_loss - self.last_val_loss > 0.001:
                K.set_value(model.optimizer.lr, lr * 0.7)
                print("[*] lr changed from {:.6f} to {:.6f}".format(
                    lr, K.get_value(model.optimizer.lr)))

            self.last_val_loss = curr_val_loss
            return

    def scheduler(epoch):

        if epoch % epoch_decay == 0 and epoch != 0:
            lr = K.get_value(model.optimizer.lr)
            K.set_value(model.optimizer.lr, lr * decay_rate)
            print("[*] lr changed to {}".format(lr * decay_rate))
        return K.get_value(model.optimizer.lr)

    checkpointer = ModelCheckpoint(filepath=log_dir + '/model.hdf5',
                                   verbose=1,
                                   save_best_only=True,
                                   monitor='val_categorical_accuracy')
    tensorVizualisation = TensorBoard(log_dir=log_dir,
                                      histogram_freq=0,
                                      write_graph=True,
                                      write_images=True,
                                      embeddings_freq=0,
                                      embeddings_layer_names=None,
                                      embeddings_metadata=None)
    earlyStop = EarlyStopping(monitor='val_loss',
                              min_delta=0.001,
                              patience=10,
                              verbose=1,
                              mode='auto')
    lr_decay = LearningRateScheduler(scheduler)
    bold_decay = BoldScheduler()

    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['categorical_accuracy'])

    plot_model(model, to_file='../notebook/result/model.png')

    model.fit(x_train,
              y_train,
              batch_size=hyperparameters['batch_size'],
              epochs=hyperparameters['epochs'],
              verbose=1,
              validation_data=(x_test, y_test),
              shuffle=True,
              callbacks=[checkpointer, tensorVizualisation])

    copy_file(log_dir + '/model.hdf5', args.save_dir + '/last.hdf5')

    return model