Example #1
0
def build(source, build_dir, root, initiator):
    logging.info("Sync script started by %s...", initiator)

    # TODO: use this approach to include standards that are not managed on GitHub
    #standards = OSFS(source).listdir(dirs_only=True)
        
    # check if initiator is present in repos.json
    if initiator in standards_id.keys():
        cleanup(source, build_dir, initiator)

        logging.info("Fetching repo %s..." % initiator)
        backend.fetch_repo(root, initiator, standards_id[initiator]['url'], build_dir)
        
        logging.info("Building folders...")
        backend.build_folders(source, build_dir, standards_id[initiator], root)
        
        logging.info("Creating overview page...")
        webpages.create_overview_page(standards, source, build_dir)
    else:
        print "%s is not listed in repos.json... aborting." % initiator
        logging.error("%s is not listed in repos.json... aborting" % initiator)
        exit()
        #TODO: check if repo needs to be removed from repos/

    print "Done!"
Example #2
0
def make_audio(page, title, id_, directory):
    rand = random_alpha_numeric_generator()
    audio_name = title.lower() + '_pg' + str(id_) + ".mp3"
    tempfile = directory + rand + ".txt"
    output_file = directory + audio_name
    audio_files = []

    for i, element in enumerate(page):
        with open(directory + rand + str(i), "w") as f:
            f.write(element.text)
            f.close()
        do_tts(directory + rand + str(i),
               directory + rand + str(i) + ".wav")
        audio_files.append(directory + rand + str(i))

    convert_wav_to_mp3(audio_files)
    audio_lengths = get_length(audio_files)

    with open(tempfile, "w") as f:
        for file_ in audio_files:
            f.write("file '{}.mp3'\n".format(file_))
        f.close()

    merge_audio_files(audio_files, output_file, tempfile)
    cleanup(directory, rand)

    return audio_lengths, audio_files
Example #3
0
    def run(self, iterations=1):
        cwd = os.getcwd()

        for i in range(iterations):
            for case_name, case in Test.cases.items():
                if case_name not in self._configs:
                    continue

                os.chdir(case.path)
                if i == 0 and case.cleanup:
                    cleanup(self._arch)

                self._run_impl(case_name, None)

                os.chdir(cwd)

                if self._version is None:
                    continue

                for version in case.versions:
                    if version == self._version or self._version == 'all':
                        os.chdir(case.path + '-' + version)
                        if i == 0 and case.cleanup:
                            cleanup(self._arch)

                        self._run_impl(case_name, version)

                        os.chdir(cwd)
def build_eval(filename):
    data = []
    skipped = 0
    total = 0
    with open(filename, "r") as fin:
        for line in fin:
            total += 1
            word, definition = line.strip().split("\t")
            word = cleanup(word)
            definition = [
                cleanup(token)
                for token in tokenise_and_cleanup_sentence(definition)
            ]
            if word not in pre_bacov:
                skipped += 1
                continue
            word = pre_embs[pre_bacov[word]] / np.linalg.norm(
                pre_embs[pre_bacov[word]])
            definition = [
                input_vocab.bacov[token] for token in definition
                if token in input_vocab.bacov
            ]
            if len(definition) < 1:
                skipped += 1
                continue
            # the trailing singleton dimensions below indicate a batch size of 1
            word = np.reshape(np.array(word, dtype=np.float32),
                              (output_dim, 1))
            definition = np.reshape(np.array(definition, dtype=np.int32),
                                    (len(definition), 1))
            data.append((word, definition))
    print(filename + ": skipped " + str(skipped) + " examples out of " +
          str(total) + ".")
    return data
Example #5
0
def author(controller_path, cfg):
    ubitname = raw_input("UBIT Name: ")

    link_costs = [
    ('1', '2', 3),
    ('1', '3', 1),
    ('1', '4', 7),
    ('2', '4', 2),
    ('4', '5', 2),
    ('4', '3', 1),
    ('5', '2', 6),
    ]

    cntrl_port = utils.random_port()
    remote_api.init_remote_assignment(cfg, cntrl_port)
    ROUTERS, ROUTER_ID_MAPPING = utils.gen_topology(link_costs, cntrl_port)

    for router in ROUTERS:
        success = False
        utils.run_cmd([controller_path, '-t', 'topology', '-a', str(router.id), '-o', 'response.pkt'])
        try:
            with open('response.pkt', 'r') as f:
                header = struct.unpack('!4sBBH', f.read(8))
                controller_ip, cntrl_code, resp_code, payload_len = utils.parse_response_header(header)
                payload = f.read(payload_len)
                os.system('hexdump -C response.pkt')
                if payload == 'I, '+ubitname+', have read and understood the course academic integrity policy.':
                    success = True
        except:
            success = False

    remote_api.cleanup(cfg)
    utils.cleanup()
    print success
    def is_opened(self, file_name, day):
        if datetime.now().date() != self.current_date:
            old_current_date = self.current_date
            self.current_date = datetime.now().date()
            for key in self.open_files:
                name, ext = os.path.splitext(key)
                archive_file_name = '%s.%s%s' % (name, old_current_date, ext)

                opened_file = self.open_files.get(key, None)
                if opened_file:
                    opened_file.close()
                if os.path.exists(archive_file_name):
                    with open(archive_file_name, 'a') as fp:
                        temp = open(key, 'r')
                        fp.write(temp.read())
                        temp.close()
                else:
                    os.rename(key, archive_file_name)

                self.open_files[key] = open('%s' % key, 'a')

            if day is not None:
                cleanup(day, file_name)
        if file_name in self.open_files:
            return True
        return False
Example #7
0
 def scrape_taiwan_data(self):
     print("Starting taiwan scrape")
     country = "Taiwan"
     r = requests.get(
         "https://services7.arcgis.com/HYXUMO0l0lNCIifa/arcgis/rest/services"
         "/Wuhan_Coronavirus_Taiwan_County/FeatureServer/0/query?f=json&where=Confirmed%3E0"
         "&returnGeometry=false&spatialRel=esriSpatialRelIntersects&outFields=*&orderByFields=Confirmed"
         "%20desc&resultOffset=0&resultRecordCount=25&cacheHint=true")
     response = r.json()
     cleanup(taiwan_csv)
     for feature in response["features"]:
         attributes = feature["attributes"]
         city = attributes["COUNTYENG"]
         infected = attributes["Confirmed"]
         recovered = attributes["recover"]
         deaths = attributes["death"]
         latitude, longitude = self.geojson_service.get_lat_long(
             country, city)
         self.write_to_file(country=country,
                            region=city,
                            infected=infected,
                            recoveries=recovered,
                            deaths=deaths,
                            long=longitude,
                            lat=latitude,
                            filename=taiwan_csv)
     move_to_final(taiwan_csv)
Example #8
0
    def main(self, tries=0):

        try:
            media_file_path, title = self.choose_content()

            if not media_file_path:
                tries -= 1
                if tries > 0:
                    logger.warning("Trying again...")
                    self.main(tries=tries)

                # Try to get the thumbnail.
                media_file_path, title = self.get_djvu(just_thumbnail=True)

            logger.info("The winner is... %s, %s" % (media_file_path, title))

            if not self.dry_run:
                twitter_poster = TwitterPoster(self.config)
                twitter_poster.put_media_to_timeline(media_file_path, title)
                cleanup(self.config)

        except (Exception, APIException, ConverterException) as e:
            # Catch any exception and try n times until you get a result.
            tb = traceback.format_exc()
            logger.error("Caught exception: %s \n %s" % (e, tb))
            logger.warning("Trying again...")
            cleanup(self.config)
            tries -= 1
            if tries > 0:
                self.main(tries=tries)
def main():
    print("Static HTML file browser for Dropbox")

    parser = argparse.ArgumentParser()

    parser.add_argument(
        "location", help="path to the Public folder of your Dropbox folder.")

    group = parser.add_mutually_exclusive_group()
    group.add_argument(
        "-i",
        "--install",
        action="store_true",
        help=
        "prepares your Dropbox folder by copying icons to the specified directory.\
                             This directory can be set up in config.py configuration file."
    )
    group.add_argument(
        "--clean",
        action="store_true",
        help="cleans your Dropbox directory by deleting index.html files.")

    args = parser.parse_args()

    if args.install:
        utils.install(args.location)
        exit(0)

    if args.clean:
        utils.cleanup(args.location)
        exit(0)

    create_index_html(args.location)
 def process_data_for_malaysia(self):
     try:
         a = WikipediaService(url="https://en.m.wikipedia.org/wiki/2020_coronavirus_pandemic_in_Malaysia")
         country = "Malaysia"
         table_text = "District/City"
         start_row = "3"
         table = a.search_table(table_text)
         if start_row.isnumeric():
             table = table[int(start_row) - 1:]
         res = []
         for row in table:
             region = self.get_city_name(row[0], row[1])
             if 'Unknown' in region or 'Imported' in region:
                 continue
             infected = row[-1]
             d = dict(
                 region=region,
                 infected=infected,
                 deaths="0",
                 recoveries="0",
             )
             res.append(d)
         file_name = self.get_output_file(country)
         cleanup(file_name)
         self.write_output_for_country(res, country=country, file_name=file_name)
         move_to_final(file_name)
     except Exception as e:
         print(f"Exception fetching malaysia data: {e}")
Example #11
0
def main(args):
    zipfilepath = args.zip
    if zipfilepath is None:
        print "pass arguements correctly!"
        exit(-1)
    xmlfilepath = args.xmlfile
    zip_path = zipfilepath
    if utils.valid_file(zip_path) is not True:
        print "bad zip"
        exit(-1)
    data_for_all_files = []
    path_to_extract = utils.random_temp_path(TEMP_DIR)
    utils.extractor(zip_path, path_to_extract)
    list_of_all_files = utils.getListOfFiles(path_to_extract)
    
    for path_to_file in list_of_all_files:
        uid = utils.get_uuid()
        filename = utils.stripfilepath(path_to_file)
        rel_path = utils.get_relative_path(path_to_file, path_to_extract)
        md5hash = utils.md5sum(path_to_file)
        filesize = utils.get_file_size(filepath=path_to_file)
        data = FileDetails(file_uuid=uid, file_name=filename, file_full_path=path_to_file, relative_path=rel_path, file_md5hash=md5hash, file_size=filesize)
        data_for_all_files.append(data)
    
    XS.XMLSerialize(data_for_all_files, xmlfilepath)
    utils.cleanup(path_to_extract)
    exit(0)
 def process_data_for_myanmar(self):
     try:
         a = WikipediaService(url="https://en.wikipedia.org/wiki/2020_coronavirus_pandemic_in_Myanmar")
         country = "Myanmar"
         table_text = "Confirmed COVID-19 cases by Township"
         start_row = "3"
         table = a.search_table(table_text, index=1)
         if start_row.isnumeric():
             table = table[int(start_row) - 1:]
         res = []
         for row in table:
             region = self.get_city_name(row[1], row[2])
             if 'Total' in region:
                 continue
             infected = row[3]
             deaths = row[-1]
             d = dict(
                 region=region,
                 infected=sanitize_digit(infected),
                 deaths=sanitize_digit(deaths),
                 recoveries="0",
             )
             res.append(d)
         file_name = self.get_output_file(country)
         cleanup(file_name)
         self.write_output_for_country(res, country=country, file_name=file_name)
         move_to_final(file_name)
     except Exception as e:
         print(f"Exception fetching myanmar data: {e}")
def main():
    print "Static HTML file browser for Dropbox"

    parser = argparse.ArgumentParser()

    parser.add_argument("location",
                        help="path to the Public folder of your Dropbox folder.")

    group = parser.add_mutually_exclusive_group()
    group.add_argument("-i", "--install",
                       action="store_true",
                       help="prepares your Dropbox folder by copying icons to the specified directory.\
                             This directory can be set up in config.py configuration file.")
    group.add_argument("--clean",
                       action="store_true",
                       help="cleans your Dropbox directory by deleting index.html files.")

    args = parser.parse_args()

    if args.install:
        utils.install(args.location)
        exit(0)

    if args.clean:
        utils.cleanup(args.location)
        exit(0)

    create_index_html(args.location)
Example #14
0
    def test_cleanup_redis(self):
        self.redis_conn.set("mykey", 5)
        cleanup(self.redis_conn)

        # Correctly flushes Redis
        assert self.redis_conn.get("mykey") is None
        # Correctly saves data to a pickle
        assert os.path.exists("redisData.pkl")
Example #15
0
def grade_data(controller_path, link_costs, pass_stats, pass_file, pass_fail, expected_path_success, expected_path_fail, init_ttl_success, init_ttl_fail, src, dst, cfg):
    score = 0.0
    cntrl_port = utils.random_port()
    remote_api.init_remote_assignment(cfg, cntrl_port)
    ROUTERS, ROUTER_ID_MAPPING = utils.gen_topology(link_costs, cntrl_port, scramble=True)

    init_ttl = init_ttl_success
    transfer_id = random.randint(1,100)
    init_seq_num = random.randint(1,100)
    filename = 'testfile1'

    # Move file on src router
    remote_api.copy_file_to(cfg, utils.get_router_ip(int(ROUTER_ID_MAPPING[src]), ROUTERS), filename, filename)

    # Remove file on dst router (if it exists)
    remote_api.delete_file_from(cfg, utils.get_router_ip(int(ROUTER_ID_MAPPING[dst]), ROUTERS), 'file-*')

    # INIT
    utils.run_cmd([controller_path, '-t', 'topology', '-i', '1', '-o', 'response.pkt'])
    os.system('rm response.pkt*') #cleanup
    sleep(2)

    # Do File Transfer
    remote_api.run_cmd(cfg, utils.get_router_ip(int(ROUTER_ID_MAPPING[src]), ROUTERS), 'chmod 777 '+filename)
    utils.run_cmd([controller_path, '-t', 'topology', '-f', ROUTER_ID_MAPPING[src], ROUTER_ID_MAPPING[dst], str(init_ttl), str(transfer_id), str(init_seq_num), filename, '-o', 'response.pkt'])
    sleep(8)

    # Checks File stats/meta
    expected_path = expected_path_success
    if check_file_transfer(controller_path, init_ttl, transfer_id, init_seq_num, filename, expected_path, ROUTER_ID_MAPPING): score += pass_stats
    # Check File itself
    if compare_files(cfg, filename, utils.get_router_ip(int(ROUTER_ID_MAPPING[dst]), ROUTERS), 'file-'+str(transfer_id)): score += pass_file

    init_ttl = init_ttl_fail
    transfer_id = random.randint(1,100)
    init_seq_num = random.randint(1,100)
    filename = 'testfile2'

    # Move file on src router
    remote_api.copy_file_to(cfg, utils.get_router_ip(int(ROUTER_ID_MAPPING[src]), ROUTERS), filename, filename)

    # Remove file on dst router (if it exists)
    remote_api.delete_file_from(cfg, utils.get_router_ip(int(ROUTER_ID_MAPPING[dst]), ROUTERS), 'file-*')

    # Do File Transfer
    remote_api.run_cmd(cfg, utils.get_router_ip(int(ROUTER_ID_MAPPING[src]), ROUTERS), 'chmod 777 '+filename)
    utils.run_cmd([controller_path, '-t', 'topology', '-f', ROUTER_ID_MAPPING[src], ROUTER_ID_MAPPING[dst], str(init_ttl), str(transfer_id), str(init_seq_num), filename, '-o', 'response.pkt'])
    sleep(8)

    # Checks File stats/meta and File Transfer failure
    expected_path = expected_path_fail
    if check_file_transfer(controller_path, init_ttl, transfer_id, init_seq_num, filename, expected_path, ROUTER_ID_MAPPING) and not (compare_files(cfg, filename, utils.get_router_ip(int(ROUTER_ID_MAPPING[dst]), ROUTERS), 'file-'+str(transfer_id))):
        score += pass_fail

    remote_api.cleanup(cfg)
    utils.cleanup()
    return score
Example #16
0
def example_three():
    """ git rebase -i changing order of commits """

    text = """\nExample #3: Interactive Rebasing: changing order\n
            In this example, we'll perform an interactive rebase
            to change the order of the commits.\n
           """
    print(color_it(text, 'white'))
    input(color_it('\nPress enter to continue...', 'yellow'))
    print(color_it("\nLet's look at our git log\n", 'white'))
    call_repo_command(['git', 'log'])
    text = """
            Okay. So now we decide it makes more sense for the second change
            to come before the first change. This is a rather contrived
            example but, let's see how it works.
            """
    print(color_it(text, 'white'))
    input(color_it('\nPress enter to continue...', 'yellow'))
    text = """
           We need to specify at what point in history we want to start
           our rebase. We can specify a commit hash or use the HEAD
           pointer. We'll go back two commits by running:\n
           git rebase -i HEAD~2
           """
    print(color_it(text, 'white'))
    input(color_it('\nPress enter to continue...', 'yellow'))
    text = """
           The interactive rebase session will open your configured
           editor. To change the order simply copy/paste the lines in
           the order you'd like to have them and save.
           The changes will be applied from top to bottom.
         """
    print(color_it(text, 'white'))
    input(color_it('\nPress enter to start rebasing...', 'yellow'))
    print(
        color_it("\nRunning command: ", 'red') +
        color_it('git rebase -i HEAD~2'))
    time.sleep(1)
    subprocess.call(['git', 'rebase', '-i', 'HEAD~2'], cwd='repo')
    input(color_it('\nPress enter to check the git log...', 'yellow'))
    call_repo_command(['git', 'log'])
    print(color_it("\nDid you change the order? Cool.", 'white'))
    print(
        color_it(
            "\nYou are like Dr. Who: travelling through time and doing good.",
            'white'))
    time.sleep(1)
    print(color_it("\nThat's the end of example 3"))
    key = input(
        color_it("\nPress enter to move to the next example or 'q' to quit",
                 'yellow'))
    if key == 'q':
        print(color_it("Cleaning up...."))
        cleanup()
        sys.exit()
    else:
        call_example(4)
Example #17
0
 def scrape_john_hopkins_data(self, date_str=None):
     print("Starting john hopkins scrape")
     excluded_countries = ["China"]
     countries = ["Australia", "Canada", "China"]
     curr_date = pendulum.now()
     processed = False
     if not date_str:
         date_str = curr_date.strftime("%m-%d-%Y")
     while not processed:
         print(f"Trying {date_str}...")
         r = requests.get(
             "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data"
             f"/csse_covid_19_daily_reports/{date_str}.csv")
         if r.status_code == 200:
             processed = True
         else:
             print(
                 f"Error! Status Code: {r.status_code}. Data still not updated."
             )
             curr_date = curr_date.subtract(days=1)
             date_str = curr_date.strftime("%m-%d-%Y")
             move_to_final(hopkins_csv)
             return
     if r.status_code == 200:
         req = StringIO(r.text)
         df = pd.read_csv(req).replace(np.nan, '', regex=True)
         df = df[df["Country_Region"].isin(countries)]
     else:
         print(r)
         return
     cleanup(hopkins_csv)
     for index, row in df.iterrows():
         country = row["Country_Region"]
         if country in excluded_countries:
             continue
         city = row["Admin2"]
         region = row[
             "Province_State"] if not city else f'{city}, {row["Province_State"]}'
         confirmed = row["Confirmed"]
         deaths = row["Deaths"]
         recoveries = row["Recovered"]
         active = row["Active"]
         latitude = row["Lat"]
         longitude = row["Long_"]
         if "Unassigned" in region:
             continue
         self.write_to_file(country=country,
                            region=region,
                            infected=confirmed,
                            deaths=deaths,
                            recoveries=recoveries,
                            long=longitude,
                            lat=latitude,
                            filename=hopkins_csv)
     move_to_final(hopkins_csv)
     return df
Example #18
0
def example_four():
    """ git rebase -i squash commits """

    text = """\nExample #4: Interactive Rebasing: SQUASHING!\n
            In this example, we'll combine the contents of two commits
            into one commit using git rebase -i.
           """
    print(color_it(text, 'white'))
    input(color_it('\nPress enter to continue...', 'yellow'))
    text = """
            Okay, let's say we decided that the last two commits are
            very closely related and they should really be squashed
            together. We'll run git rebase -i again to 'squash' the
            commits together.
           """
    print(color_it(text, 'white'))
    input(color_it('\nPress enter to continue...', 'yellow'))
    text = """
            We'll go back two commits again, but this time we'll 'pick'
            the first commit and squash the second into it. When the
            interactive rebase is opened in your editor, change the word
            'pick' to 'squash' in front of the commit you wish to
            combine into the previous one.\n
            You will be given an opportunity to alter your commit
            message for the combined commits.
            Remember the changes will be applied from top to bottom.
           """
    print(color_it(text, 'white'))
    input(color_it('\nPress enter to SQUASH...', 'yellow'))
    print(
        color_it("\nRunning command: ", 'red') +
        color_it('git rebase -i HEAD~2'))
    time.sleep(1)
    subprocess.call(['git', 'rebase', '-i', 'HEAD~2'], cwd='repo')
    input(color_it("\nLet's check the git log. Press enter...", 'yellow'))
    call_repo_command(['git', 'log'])
    print(
        color_it(
            "\nThere should just be two commits now. Did you combine the commits? Coolness.",
            'white'))
    input(color_it('\nPress enter to continue...', 'yellow'))
    text = """WARNING\n
           Remember to never change history that has already been
           consumed by others. For example you should not change the
           history of commits that have already been merged into a
           master or develop branch because other developers may have
           based their own changes off of this history. This will cause
           very bad headaches.
           Be a responsible time traveller."""
    print(color_it(text, 'red'))
    time.sleep(1)
    print(color_it("\nThat's currently the end of the walkthrough"))
    input(color_it('Press enter to cleanup and exit...', 'yellow'))
    print(color_it("Cleaning up...."))
    cleanup()
    sys.exit()
 def process_data_for_usa(self):
     try:
         wms = WorldoMeterService()
         country = "USA"
         records = wms.get_us_stats()
         file_name = self.get_output_file(country)
         cleanup(file_name)
         self.write_output_for_country(records, country=country, file_name=file_name)
         move_to_final(file_name)
     except Exception as e:
         print(f"Exception fetching usa data: {e}")
Example #20
0
def logout(request):
    cleanup(request)
    auth.logout(request)
    #request.session.set_test_cookie()
    form = AuthForm()
    t = get_template('registration/login.html')
    ts = get_template('theme_select.html')
    loginform = t.render(RequestContext(request, {'form': form}))
    themeselect = ts.render(RequestContext(request))
    return json_result({'success': 1,
                        'loginform': loginform,
                        'themeselect': themeselect})
 def process_global_stats(self):
     try:
         wms = WorldoMeterService()
         country = "global"
         records = wms.get_global_stats()
         file_name = self.get_output_file(country)
         cleanup(file_name)
         self.global_stats = records
         self.write_output_for_country(records, file_name=file_name)
         move_to_final(file_name)
     except Exception as e:
         print(f"Exception fetching global data: {e}")
Example #22
0
def predict():

    # upload all images in request to folder on server
    filepaths = upload(request, UPLOAD_FOLDER)

    # parse and resize images then get prediction
    parsed = readresize(filepaths)
    result = getprediction(parsed)

    # cleanup and return result
    cleanup(filepaths)
    return jsonify({'prediction': result.tolist()})
Example #23
0
    def __init__(self, query, delay, epochs, group):
        threading.Thread.__init__(self)
        cleanup()
        self.query = query
        self.delay = delay
        self.epochs = epochs
        self.group = group

        self.threads = {}
        self.results = {}
        self.queries = {}
        self.split_query()
def build(source, build_dir, root, initiator):
    """Builds the register in build_dir/source.

    source is either settings.register_path of setting.staging_path
    """

    logging.info("Sync script started by %s...", initiator)

    # canditate for removal as this is only place it is used
    standards_id, standards = load_repos(repos_path)

    # TODO: move to utils
    clusters_id = {}
    with open(cluster_path) as f:
        clusters = load(f)

        for cluster in clusters:
            clusters_id[cluster["id"]] = cluster

    # check if initiator is present in repos.json
    if initiator in standards_id.keys():
        cleanup(build_path, source, build_dir, initiator)

        logging.info("Fetching repo %s..." % initiator)
        fetch_repo(root, source, initiator, standards_id[initiator]["url"], build_path)

        logging.info("Building folders...")
        build_folders(source, build_dir, standards_id[initiator], root, standards_id[initiator]["cluster"], build_path)
        create_infomodel_homepage(
            root,
            source,
            assets_path,
            build_path,
            build_dir,
            standards_id[initiator]["cluster"],
            standards_id[initiator],
        )

        logging.info("Creating homepagepage...")
        webpages.create_register_homepage(clusters, source, build_dir)

        if standards_id[initiator]["cluster"] != "":
            webpages.create_cluster_overview(
                standards, source, build_dir, standards_id[initiator]["cluster"], root, assets_path
            )
    else:
        print "%s is not listed in repos.json... aborting." % initiator
        logging.error("%s is not listed in repos.json... aborting" % initiator)
        exit()
        # TODO: check if repo needs to be removed from repos/

    print "Done!"
Example #25
0
def test_start_from_args_simple_args(requires_tmp):
    """Test executing start_from_args with no arguments.

    Will pass if,
        A) It returns a Logger without Exception.
        B) Logger logs to file correctly
    """
    tmp_file = 'test/tmp/test.log'
    log = conlog.start(level='INFO', log_file=tmp_file)
    log.info('Testing')
    message = '     INFO -                 root - Testing'
    assert utils.read_log_entry(tmp_file) == message
    utils.cleanup(log, tmp_file)  # Cleanup log file
Example #26
0
def example_four():
    """ git rebase -i squash commits """

    text = """\nExample #4: Interactive Rebasing: SQUASHING!\n
            In this example, we'll combine the contents of two commits
            into one commit using git rebase -i.
           """
    print(color_it(text, 'white'))
    input(color_it('\nPress enter to continue...', 'yellow'))
    text = """
            Okay, let's say we decided that the last two commits are
            very closely related and they should really be squashed
            together. We'll run git rebase -i again to 'squash' the
            commits together.
           """
    print(color_it(text, 'white'))
    input(color_it('\nPress enter to continue...', 'yellow'))
    text = """
            We'll go back two commits again, but this time we'll 'pick'
            the first commit and squash the second into it. When the
            interactive rebase is opened in your editor, change the word
            'pick' to 'squash' in front of the commit you wish to
            combine into the previous one.\n
            You will be given an opportunity to alter your commit
            message for the combined commits.
            Remember the changes will be applied from top to bottom.
           """
    print(color_it(text, 'white'))
    input(color_it('\nPress enter to SQUASH...', 'yellow'))
    print(color_it("\nRunning command: ", 'red') + color_it('git rebase -i HEAD~2'))
    time.sleep(1)
    subprocess.call(['git', 'rebase', '-i', 'HEAD~2'], cwd='repo')
    input(color_it("\nLet's check the git log. Press enter...", 'yellow'))
    call_repo_command(['git', 'log'])
    print(color_it("\nThere should just be two commits now. Did you combine the commits? Coolness.", 'white'))
    input(color_it('\nPress enter to continue...', 'yellow'))
    text = """WARNING\n
           Remember to never change history that has already been
           consumed by others. For example you should not change the
           history of commits that have already been merged into a
           master or develop branch because other developers may have
           based their own changes off of this history. This will cause
           very bad headaches.
           Be a responsible time traveller."""
    print(color_it(text, 'red'))
    time.sleep(1)
    print(color_it("\nThat's currently the end of the walkthrough"))
    input(color_it('Press enter to cleanup and exit...', 'yellow'))
    print(color_it("Cleaning up...."))
    cleanup()
    sys.exit()
Example #27
0
def example_three():
    """ git rebase -i changing order of commits """

    text = """\nExample #3: Interactive Rebasing: changing order\n
            In this example, we'll perform an interactive rebase
            to change the order of the commits.\n
           """
    print(color_it(text, 'white'))
    input(color_it('\nPress enter to continue...', 'yellow'))
    print(color_it("\nLet's look at our git log\n", 'white'))
    call_repo_command(['git', 'log'])
    text = """
            Okay. So now we decide it makes more sense for the second change
            to come before the first change. This is a rather contrived
            example but, let's see how it works.
            """
    print(color_it(text, 'white'))
    input(color_it('\nPress enter to continue...', 'yellow'))
    text = """
           We need to specify at what point in history we want to start
           our rebase. We can specify a commit hash or use the HEAD
           pointer. We'll go back two commits by running:\n
           git rebase -i HEAD~2
           """
    print(color_it(text, 'white'))
    input(color_it('\nPress enter to continue...', 'yellow'))
    text = """
           The interactive rebase session will open your configured
           editor. To change the order simply copy/paste the lines in
           the order you'd like to have them and save.
           The changes will be applied from top to bottom.
         """
    print(color_it(text, 'white'))
    input(color_it('\nPress enter to start rebasing...', 'yellow'))
    print(color_it("\nRunning command: ", 'red') + color_it('git rebase -i HEAD~2'))
    time.sleep(1)
    subprocess.call(['git', 'rebase', '-i', 'HEAD~2'], cwd='repo')
    input(color_it('\nPress enter to check the git log...', 'yellow'))
    call_repo_command(['git', 'log'])
    print(color_it("\nDid you change the order? Cool.", 'white'))
    print(color_it("\nYou are like Dr. Who: travelling through time and doing good.", 'white'))
    time.sleep(1)
    print(color_it("\nThat's the end of example 3"))
    key = input(color_it("\nPress enter to move to the next example or 'q' to quit",
                         'yellow'))
    if key == 'q':
        print(color_it("Cleaning up...."))
        cleanup()
        sys.exit()
    else:
        call_example(4)
Example #28
0
    def get_tf_cam_world(self, load_tf=False):

        try:
            if load_tf == False:
                M = self.__rigid_transform_3D()
                print M
                if self.save_tf:
                    self.__save(M)
            else:
                M = self.__load()

        except Exception, err:
            traceback.print_exc(file=sys.stdout)
            utils.cleanup()
    def test_cleanup_images(self):
        # create an empty file
        with open("../EDA_miner/static/images/python_generated_ssid_file.txt", "w") as f:
            f.write("hello")

        cleanup(self.r)

        # get all files
        images = os.listdir(os.path.abspath("../EDA_miner/static/images"))
        # keep only files that belong to non-registered users
        non_user_images = [img for img in images
                           if "python_generated_ssid" in img]

        assert len(non_user_images) == 0
Example #30
0
def main():
    if sys.argv[1] == '-g':
        gen_parts(int(sys.argv[2]), sys.argv[3], int(sys.argv[4]),
                  int(sys.argv[5]))
    elif sys.argv[1] == '-c':
        idx_parts = list(map(lambda idx: int(idx), sys.argv[2:]))
        if len(set(idx_parts)) != len(idx_parts):
            print('ОШИБКА: дубликаты в массиве индексов')
            return
        parts = [read_struct('part_{}.txt'.format(idx)) for idx in idx_parts]
        check_secret(read('p.txt'), parts)
    elif sys.argv[1] == '-clean':
        cleanup()
    else:
        print('ОШИБКА: неверный код операции')
Example #31
0
def test_start_from_yaml_complex(requires_tmp):
    """Test executing start_from_yaml with a complex YAML
    configuration.

    Will pass if,
        A) It returns a Logger without Exception.
        B) Logger logs to file correctly
    """
    tmp_file = 'test/tmp/test.log'  # Specified in test/yaml/complex.yml
    log = conlog.start(yaml_file='test/yaml/complex.yml')
    log.info('Testing')
    #
    # TODO: Add log output validation
    #
    utils.cleanup(log, tmp_file)
Example #32
0
 def from_json(cls, xkcd_json):
     xkcd_json["transcript"] = utils.cleanup(xkcd_json["transcript"])
     return cls(
         content=xkcd_json["transcript"],
         title=xkcd_json["title"],
         link=xkcd_json["img"],
     )
def write_to_bq(pubsub, sub_name, bigquery):
    """Write the data to BigQuery in small chunks."""
    tweets = []
    CHUNK = 50  # The size of the BigQuery insertion batch.
    # If no data on the subscription, the time to sleep in seconds
    # before checking again.
    WAIT = 2
    tweet = None
    mtweet = None
    while True:
        while len(tweets) < CHUNK:
            twmessages = pull_messages(pubsub, PROJECT_ID, sub_name)
            if twmessages:
                for res in twmessages:
                    try:
                        tweet = json.loads(res)
                    except Exception, bqe:
                        print bqe
                    # First do some massaging of the raw data
                    mtweet = utils.cleanup(tweet)
                    # We only want to write tweets to BigQuery; we'll skip
                    # 'delete' and 'limit' information.
                    if 'delete' in mtweet:
                        continue
                    if 'limit' in mtweet:
                        print mtweet
                        continue
                    tweets.append(mtweet)
            else:
                # pause before checking again
                print 'sleeping...'
                time.sleep(WAIT)
        utils.bq_data_insert(bigquery, PROJECT_ID, os.environ['BQ_DATASET'],
                             os.environ['BQ_TABLE'], tweets)
        tweets = []
Example #34
0
def profile():
    emailid = request.form['emailid'].strip()
    validated = validate(emailid)
    if not validated:
        return render_template("sorry.html")
    else:
        try:
            redis_server = redis.Redis(connection_pool=POOL)
            if not redis_server.exists(emailid):
                payload = {
                    'key': API_KEY,
                    'person_email': emailid,
                }
                response = requests.get(API, params=payload, verify=False)
                if response.status_code == 200:
                    data = json.loads(response.text)
                    if data.get('profile').get('status').get(
                            'has_person_data'):
                        cleaned_data = cleanup(data)
                        redis_server.set(emailid, json.dumps(cleaned_data))
                        redis_server.bgsave()
                else:
                    return render_template("sorry.html")
            try:
                data = json.loads(redis_server.get(emailid))
            except ValueError:
                data = literal_eval(redis_server.get(emailid))
            if data.get('profile').get('status').get('has_person_data', False):
                return render_template("data.html",
                                       user=data['profile']['person_data'])
            else:
                return render_template("sorry.html")
        except (requests.exceptions.ConnectionError, TypeError):
            return render_template("sorry.html")
Example #35
0
 def test_utils_cleanup(self):
     test_file = '/tmp/' + utils.test_name()
     self.assertFalse(os.path.exists(test_file))
     with utils.cleanup(['rm', test_file]):
         utils.run(['touch', test_file])
         self.assertTrue(os.path.exists(test_file))
     self.assertFalse(os.path.exists(test_file))
 def test_glance_manager_mocked_ok_double(self):
     cln = ['glance', '--os-image-api-version', '1', 'image-delete',
            'GLANCE_MANAGER_CIRROS_TESTING_IMAGE']
     with utils.cleanup(cln):
         locpath = get_local_path('..', 'gm_list.txt')
         ret = glance_manager.main(['-v', '-l', locpath, '-l', locpath])
         self.assertTrue(ret)
Example #37
0
 def test_utils_cleanup(self):
     test_file = '/tmp/' + utils.test_name()
     self.assertFalse(os.path.exists(test_file))
     with utils.cleanup(['rm', test_file]):
         utils.run(['touch', test_file])
         self.assertTrue(os.path.exists(test_file))
     self.assertFalse(os.path.exists(test_file))
def write_to_bq(pubsub, sub_name, bigquery):
    """Write the data to BigQuery in small chunks."""
    tweets = []
    CHUNK = 50  # The size of the BigQuery insertion batch.
    # If no data on the subscription, the time to sleep in seconds
    # before checking again.
    WAIT = 2
    tweet = None
    mtweet = None
    while True:
        while len(tweets) < CHUNK:
            twmessages = pull_messages(pubsub, PROJECT_ID, sub_name)
            if twmessages:
                for res in twmessages:
                    try:
                        tweet = json.loads(res)
                    except Exception, bqe:
                        print bqe
                    # First do some massaging of the raw data
                    mtweet = utils.cleanup(tweet)
                    # We only want to write tweets to BigQuery; we'll skip
                    # 'delete' and 'limit' information.
                    if 'delete' in mtweet:
                        continue
                    if 'limit' in mtweet:
                        print mtweet
                        continue
                    tweets.append(mtweet)
            else:
                # pause before checking again
                print 'sleeping...'
                time.sleep(WAIT)
        utils.bq_data_insert(bigquery, PROJECT_ID, os.environ['BQ_DATASET'],
                             os.environ['BQ_TABLE'], tweets)
        tweets = []
def write_to_bq(bigquery):
    """Write the data to BigQuery in small chunks."""
    tweets = []
    CHUNK = 50  # The size of the BigQuery insertion batch.
    twstring = ''
    tweet = None
    mtweet = None
    while True:
        while len(tweets) < CHUNK:
            # We'll use a blocking list pop -- it returns when there is
            # new data.
            res = r.brpop(REDIS_LIST)
            twstring = res[1]
            try:
                tweet = json.loads(res[1])
            except Exception, bqe:
                print bqe
                continue
            # First do some massaging of the raw data
            mtweet = utils.cleanup(tweet)
            # We only want to write tweets to BigQuery; we'll skip 'delete' and
            # 'limit' information.
            if 'delete' in mtweet:
                continue
            if 'limit' in mtweet:
                print mtweet
                continue
            tweets.append(mtweet)
        # try to insert the tweets into bigquery
        utils.bq_data_insert(bigquery, PROJECT_ID, os.environ['BQ_DATASET'],
                             os.environ['BQ_TABLE'], tweets)
        tweets = []
def write_text(cur, file_id, file_path, label, text):
    text = cleanup(text)
    text = normalize(text)
    cur.execute('INSERT INTO Files ('
                'file_id, file_path, label_ids, text) VALUES '
                '({}, "{}", "{}", "{}")'.format(file_id, file_path, label,
                                                text))
Example #41
0
    def post(self):
        errors = []

        us_title = self.request_truncated('title', STORY_LENGTH)
        slug, errors = Story.make_unique_slug(us_title, errors)
        title = cleanup(us_title)

        content = self.request_clean('content')
        if not content:
            errors.append(error['content'])

        if not errors:
            user = users.get_current_user()
            human, collection = human_and_collection_from_user(user)

            story = Story(
                key_name = slug,
                title = title,
                content = db.Text(content),
                rand_id = random(),
                author_name = human.nickname)

            collection.stories.append(slug)
            db.put([story, collection])
            self.redirect(story.url())
        else:
            self.generate('edit_story.html', {
                'errors': errors,
                'title': title,
                'content': content,
                'access': True,
                })
Example #42
0
 def post(self, human, slug):
     errors = []
     slug = self.request.get('slug')
     collection = Collection.get_by_key_name(slug, parent = human)
     us_title = self.request_truncated('title', COL_LENGTH)
     slug = slugify(us_title)
     title = cleanup(us_title)
     action = self.request.get('action')
     if action not in ['Save Collection Title', 'Delete Collection']:
         self.error(403)
         return
     if not collection or not collection.belongs_to_current_user():
         errors.append(error['collection access'])
     elif collection.key().name() == 'main-collection':
         errors.append(error['main collection'])
     if action == "Save Collection Title":
         if not title:
             errors.append(error['title'])
         elif not slug:
             errors.append(error['slug'])
     if not errors:
         if action == "Delete Collection":
             main= Collection.get_by_key_name('main-collection',parent=human)
             main.stories.extend(collection.stories)
             human.collections.remove(collection.key().name())
             db.put([main, human])
             collection.delete()
             self.redirect('/You')
             return
         else:
             if slug == collection.key().name():
                 self.redirect('/You')
                 return
             existing_col = Collection.get_by_key_name(slug, parent=human)
             if not existing_col:
                 new_col = Collection(
                     parent = human,
                     key_name = slug,
                     title = title,
                     stories = collection.stories)
                 human.collections.append(slug)
                 human.collections.remove(collection.key().name())
                 db.put([new_col, human])
                 collection.delete()
                 self.redirect('/You')
             else:
                 existing_col.stories.extend(collection.stories)
                 human.collections.remove(collection.key().name())
                 db.put([existing_col, human])
                 collection.delete()
                 self.redirect('/You')
     else: #errors
         self.generate('edit_collection.html', {
             'errors': errors,
             'slug': slug,
             'title': title,
             })
Example #43
0
def build_register(initiator):
    """Builds the register in build_dir/sources_path."""

    root = OSFS(root_path)

    logging.info("Sync script started by %s...", initiator)

    # canditate for removal as this is only place it is used
    standards_id, standards = load_repos(repos_path)

    #TODO: move to utils
    clusters_id = {}
    with open(cluster_path) as f:
        clusters = load(f)

        for cluster in clusters:
            clusters_id[cluster['id']] = cluster

    # TODO: move to run.py
    if initiator in standards_id.keys():
        cleanup(initiator)
        
        logging.info("Fetching repo %s..." % initiator)
        fetch_repo(root, initiator, standards_id[initiator]['url'])

        # create_zipfile(initiator, root)
        
        logging.info("Building folders...")
        build_folders( standards_id[initiator], root, standards_id[initiator]['cluster'])
        
        create_infomodel_homepage(root, standards_id[initiator]['cluster'], standards_id[initiator])

        logging.info("Creating homepagepage...")
        webpages.create_register_homepage(clusters)

        if standards_id[initiator]['cluster'] != "":
            webpages.create_cluster_overview(standards, standards_id[initiator]['cluster'], root)
    else:
        print "%s is not listed in repos.json... aborting." % initiator
        logging.error("%s is not listed in repos.json... aborting" % initiator)
        exit()
        #TODO: check if repo needs to be removed from repos/

    print "Done!"
Example #44
0
def build_staging(source, destination_temp, destination):
    set_repeat('none')

    cleanup(source, destination_temp)

    root = OSFS('./') # 'c:\Users\<login name>' on Windows
    # root.makedir(source, allow_recreate=True)
    root.makedir(destination_temp, allow_recreate=True)

    # TODO: use this approach to include standards that are not managed on GitHub
    #standards = OSFS(source).listdir(dirs_only=True)
    with open('repos-dev.json') as f:
        standards = load(f)
    
    backend.fetch_repos(root, destination_temp, standards, source)
    backend.build_folders(source, destination_temp, standards, root)
    webpages.create_overview_page(standards, source, destination_temp)
    backend.create_staging(destination_temp, destination)
    
    print "Done!"
def write_to_bq(bigquery):
    """Write the data to BigQuery in small chunks."""
    tweets = []
    CHUNK = 50  # The size of the BigQuery insertion batch.
    tweet = None
    mtweet = None
    count = 0
    count_max = 50000
    redis_errors = 0
    allowed_redis_errors = 3
    while count < count_max:
        while len(tweets) < CHUNK:
            # We'll use a blocking list pop -- it returns when there is
            # new data.
            res = None
            try:
                res = r.brpop(REDIS_LIST)
            except:
                print 'Problem getting data from Redis.'
                redis_errors += 1
                if redis_errors > allowed_redis_errors:
                    print "Too many redis errors: exiting."
                    return
                continue
            try:
                tweet = json.loads(res[1])
            except Exception, e:
                print e
                redis_errors += 1
                if redis_errors > allowed_redis_errors:
                    print "Too many redis-related errors: exiting."
                    return
                continue
            # First do some massaging of the raw data
            mtweet = utils.cleanup(tweet)
            # We only want to write tweets to BigQuery; we'll skip 'delete' and
            # 'limit' information.
            if 'delete' in mtweet:
                continue
            if 'limit' in mtweet:
                continue
            tweets.append(mtweet)
        # try to insert the tweets into bigquery
        response = utils.bq_data_insert(bigquery, PROJECT_ID, os.environ['BQ_DATASET'],
                             os.environ['BQ_TABLE'], tweets)
        tweets = []
        count += 1
        if count % 25 == 0:
            print ("processing count: %s of %s at %s: %s" %
                   (count, count_max, datetime.datetime.now(), response))
Example #46
0
    def post(self, human):
        stories_new_string = self.request_clean('new')
        stories_new = stories_new_string.split(',')
        us_title_new = self.request_truncated('new_title', COL_LENGTH)
        slug_new = slugify(us_title_new)
        title_new = cleanup(us_title_new)
        if not slug_new:
            title_new = "Collection Title"
            slug_new = "collection-title"

        collections_current = human.get_collections() #Collection instances
        lists_of_stories = (c.stories for c in collections_current)
        all_stories = set(itertools.chain(*lists_of_stories))

        all_stories_new = [] #list of story slugs
        updated_collections = [] #list of Collection instances
        for collection in collections_current:
            stories_string = self.request_clean(collection.key().name())
            stories = stories_string.split(',')
            valid_stories = [s for s in stories if s in all_stories]
            collection.stories = valid_stories
            updated_collections.append(collection)
            all_stories_new.extend(valid_stories)

        valid_stories_new = [s for s in stories_new if s in all_stories]
        all_stories_new.extend(valid_stories_new)
        #why check for missing stories? in case something goes wrong!!
        missing_stories = all_stories.difference(all_stories_new)
        #main collection gets missing stories:
        updated_collections[0].stories.extend(missing_stories) 

        if valid_stories_new:
            if slug_new not in human.collections:
                human.collections.append(slug_new)
                collection_new  = Collection(
                    parent = human,
                    key_name = slug_new,
                    title = title_new,
                    stories = valid_stories_new)
                updated_collections.extend([human, collection_new])
                db.put(updated_collections)
            else:
                index = human.collections.index(slug_new)
                updated_collections[index].stories.extend(valid_stories_new)
                db.put(updated_collections)
        else:
            db.put(updated_collections)
    
        self.redirect('/You')
Example #47
0
 def test_utils_fileio_read(self):
     test_file = 'test.txt'
     utils.run(['touch', test_file])
     with utils.cleanup(['rm', test_file]):
         with open(test_file, 'w+b') as out:
             self.assertEqual('', out.read())
             out.write('TOTO')
             self.assertEqual('', out.read())
             out.seek(0)
             self.assertEqual('TOTO', out.read())
             out.seek(0, os.SEEK_END)
             out.write('TITI')
             self.assertEqual('', out.read())
             out.seek(0)
             self.assertEqual('TOTOTITI', out.read())
             out.seek(0)
             out.write('TITI')
             self.assertEqual('TITI', out.read())
             out.seek(0)
             self.assertEqual('TITITITI', out.read())
         self.assertTrue(out.closed)
Example #48
0
def example_one():
    '''Example for git commit --amend'''

    text = """\nExample #1: Amending a commit\n
            In this example, we'll modify a file, stage the file,
            commit the changes and then make further changes. These
            changes are related to the first commit and should just be
            lumped in there. We'll add the file again and run:
            git commit --amend to alter our last commit. We'll see later
            how git commit --amend is really just a special case of git
            rebase -i.
           """
    print(color_it(text, 'white'))
    input(color_it('Press enter to continue...', 'yellow'))
    text = "\nFirst, we'll make some changes to the README and save 'em...."
    print(color_it(text, 'white'))
    change_file('README.md', "The time is now: "+ time.strftime('%c',
                                                                time.localtime()))
    print(color_it("\nNow we'll check our git status..."))
    input(color_it('\nPress enter to continue...', 'yellow'))
    call_repo_command(['git', 'status'], wait=True)
    print(color_it("\nNow let's add the file"))
    time.sleep(1)
    call_repo_command(['git', 'add', 'README.md'], wait=True)
    print(color_it("\nWe'll commit that file; go ahead and add a commit message"))
    input(color_it('\nPress enter to commit...', 'yellow'))
    print(color_it("\nRunning command: ", 'red') + color_it('git commit'))
    time.sleep(1)
    subprocess.call(['git', 'commit'], cwd='repo')
    print(color_it("\nCool. Let's look at our git log..."))
    input(color_it('Press enter to continue...', 'yellow'))
    call_repo_command(['git', 'log'])
    print(color_it("There's our first commit"))
    input(color_it('\nPress enter to continue...', 'yellow'))
    print(color_it("\nRad, now let's make another change..."))
    change_file('README.md', "READ ME! The time is now: "+ time.strftime('%c',
                                                                time.localtime()))
    input(color_it('\nPress enter to continue...', 'yellow'))
    print(color_it("\nNow let's add the file again"))
    call_repo_command(['git', 'add', 'README.md'], wait=True)
    print(color_it("\nAnd this time we'll use git commit --amend\n"))
    print(color_it("Change your message if you like."))
    input(color_it('\nPress enter to commit...', 'yellow'))
    print(color_it("\nRunning command: ", 'red') + color_it('git commit --amend'))
    time.sleep(1)
    subprocess.call(['git', 'commit', '--amend'], cwd='repo')
    print(color_it("\nLet's look at our git log..."))
    input(color_it('\nPress enter to continue...', 'yellow'))
    call_repo_command(['git', 'log'])
    print(color_it("\nAnd we see just the one commit"))
    print(color_it("git commit --amend is cool!"))
    time.sleep(0.5)
    print(color_it("\nThat's the end of example 1"))
    key = input(color_it("Press enter to move to the next example or 'q' to quit",
                         'yellow'))
    if key == 'q':
        print(color_it("Cleaning up..."))
        cleanup()
        sys.exit()
    else:
        call_example(2)
Example #49
0
 def test_glancing_image_import_noname(self):
     name, _ = os.path.splitext(os.path.basename(self._TTYLINUX_FILE))
     with cleanup(glance.glance_delete, name):
         self.assertTrue(glancing.main(['-f',
             self._TTYLINUX_FILE, '-s', self._TTYLINUX_MD5]))
Example #50
0
def example_two():
    """ rebase edit """

    text = """\nExample #2: Interactive Rebasing: editing\n
              let's edit a commit message with rebase -i.
           """
    print(color_it(text, 'white'))
    input(color_it('\nPress enter to continue...', 'yellow'))
    text = "\nFirst, we'll make some changes to file1.txt and save 'em...."
    print(color_it(text, 'white'))
    change_file('file1.txt', "This change is cool")
    input(color_it('\nPress enter to continue...', 'yellow'))
    print(color_it("Now let's add the file"))
    call_repo_command(['git', 'add', 'file1.txt'], wait=True)
    print(color_it("And we'll commit that file; go ahead and add a commit message"))
    input(color_it('\nPress enter to commit...', 'yellow'))
    print(color_it("\nRunning command: ", 'red') + color_it('git commit'))
    time.sleep(1)
    subprocess.call(['git', 'commit'], cwd='repo')
    print(color_it("\nGreat. Let's look at our git log..."))
    input(color_it('\nPress enter to continue...', 'yellow'))
    call_repo_command(['git', 'log'])
    print(color_it("Cool, now let's make another change..."))
    change_file('file2.txt', "This change is also cool")
    input(color_it('\nPress enter to continue...', 'yellow'))
    print(color_it("\nNow let's add the file"))
    call_repo_command(['git', 'add', 'file2.txt'], wait=True)
    print(color_it("\nAnd we'll commit that file; go ahead and add a commit message"))
    input(color_it('\nPress enter to commit...', 'yellow'))
    print(color_it("\nRunning command: ", 'red') + color_it('git commit'))
    time.sleep(1)
    subprocess.call(['git', 'commit'], cwd='repo')
    print(color_it("\nLet's look at our git log again...", 'yellow'))
    input(color_it('\nPress enter to continue...', 'yellow'))
    call_repo_command(['git', 'log'])
    input(color_it('\nPress enter to continue...', 'yellow'))
    text = """
            We're going to run an interactive rebase using git rebase -i.\n
            Rebasing essentially rewrites history by rewinding to a
            certain point in the git history and then playing commits
            'on top' of that point. With an interactive rebase you can
            alter the commits before they are applied.
           """
    print(color_it(text, 'white'))
    text = """
           We need to specify at what point in history we want to start
           our rebase. We can specify a commit hash or use the HEAD
           pointer. We'll go back two commits by running:\n
           git rebase -i HEAD~2
           """
    print(color_it(text, 'white'))
    input(color_it('Press enter to continue...', 'yellow'))
    text = """
           The interactive rebase session will open your configured
           editor. Each line is a separate commit on which you can
           perform certain operations (there are even nice instructions
           included). Change the word 'pick' to 'edit' and save.
           The rebase will rewind and then let you amend your commit.
           You will be given an opportunity to edit the commit message.
         """
    print(color_it(text, 'white'))
    input(color_it('Press enter to start rebasing...', 'yellow'))
    print(color_it("Running command: ", 'red') + color_it('git rebase -i HEAD~2'))
    time.sleep(1)
    subprocess.call(['git', 'rebase', '-i', 'HEAD~2'], cwd='repo')
    print(color_it("We'll go ahead and amend that commit...", 'white'))
    print(color_it("Running command: ", 'red') + color_it('git commit --amend'))
    time.sleep(1)
    subprocess.call(['git', 'commit', '--amend'], cwd='repo')
    text = """\nNow we'll finish the rebase using:
        git rebase --continue"""
    print(color_it(text, 'white'))
    input(color_it('\nPress enter to continue...', 'yellow'))
    print(color_it("\nRunning command: ", 'red') + color_it('git rebase --continue'))
    call_repo_command(['git', 'rebase', '--continue'])
    print(color_it("Neat", 'white'))
    input(color_it('Press enter to check the git log...', 'yellow'))
    call_repo_command(['git', 'log'])
    text = """
        Wait a second. That was just like git commit --amend except we
        were able to travel further back in time.
        """
    print(color_it(text, 'white'))
    print(color_it("You're like a time traveler or something.", 'white'))
    time.sleep(1)
    print(color_it("\nThat's the end of example 2"))
    key = input(color_it("Press enter to move to the next example or 'q' to quit",
                         'yellow'))
    if key == 'q':
        print(color_it("Cleaning up...."))
        cleanup()
        sys.exit()
    else:
        call_example(3)
Example #51
0
 def test_glancing_url_import_no_name(self):
     name, _ = os.path.splitext(os.path.basename(self._CIRROS_URL))
     with cleanup(glance.glance_delete, name):
         self.assertTrue(glancing.main([self._CIRROS_URL]))
Example #52
0
 def wrapped(self, *f_args, **f_kwargs):
     with cleanup(glance.glance_delete, name or f.func_name):
         f(self, *f_args, **f_kwargs)
Example #53
0
 def request_clean(self, input_name, max_length = None):
     if max_length:
         return cleanup(self.request.get(input_name))[:max_length]
     else:
         return cleanup(self.request.get(input_name))
Example #54
0
        crawler_p = mp.Process(target=CrawlerProcess.run, 
                               args=(crawler, request_q, result_q)).start()
        ext_port += 1
        
    # Wait for all children to die. Exit when they are all done. This is also
    #     important because the dead children will only join() when this is
    #     called. 
    # TODO (nchachra): This should be called in case of exceptions in main too.
    while len(mp.active_children()) != 0:
        logger.info("Request q size: %s" % request_q.qsize())
        logger.info("Result q size: %s" % result_q.qsize())
        if (len(mp.active_children()) == 1 and request_q.qsize() == 1 
                and result_q.qsize() == 0):
            task = request_q.get()
            if task == "CLEANUP":
                # Removing this final task will cause the qcontroller process
                #    to join. Finally there will be 0 child processes left, 
                #    and main will join as well.
                logger.info("Only the q manager seems to be attempting " +
                            "to join q. Joining in main and exiting.")
                request_q.task_done()
        logger.debug("Active processes: %s " % mp.active_children())
        time.sleep(10)
    logger.critical("Final cleanup.")
    utils.cleanup(logger)
    logger.critical("Byebye!")


if __name__== '__main__':
    run()
def main():
    cleanup()
    introduction()
    setup_repo()
    rebase.call_example(1)