예제 #1
0
def build_file_db(start_dir, extensions):
    """
    return a database containing info for each file under the
    starting path/directory

    :param extensions:
    :param start_dir: directory in which to start
    :type start_dir: str
    :return: a 2-tuple containing the info db and time to run this function
    :rtype: tuple
    """

    if not extensions:
        sys.exit('@@@ERROR valid extension list not passed to build_file_db')

    db = {}

    spinner = MoonSpinner(
        'Working ')  # cli spinner to indicate something is happening

    for p in get_file_list(
            start_dir
    ):  # loop over all the Paths (files) in the hierarchy starting at start_dir

        # got a file (not a dir) and filename has an extension of interest
        if p.is_file() and p.suffix.lower() in extensions:

            pstring = str(p)  # get the Path in string form

            xh = get_file_hash(p)

            # first time seeing this file
            if xh not in db:

                db[xh] = [{
                    'path': pstring,
                    'extension': p.suffix.lower(),
                }]

            # found a likely alt of an existing file, add it to the alts key for for the existing file
            else:

                db[xh].append({
                    'path': pstring,
                    'extension': p.suffix.lower(),
                })

        spinner.next()

    print('\n')

    return db
예제 #2
0
def get_file_extensions(start_dir):
    """
    return a database containing the unique file extensions of files in a given directory
    starting path/directory

    todo track count of each extension type

    :param start_dir: directory in which to start
    :type start_dir: str
    :return: a 2-tuple containing the info db and time to run this function
    :rtype: tuple
    """

    exts = set()

    spinner = MoonSpinner(
        'Working ')  # cli spinner to indicate something is happening

    start = time.time()

    for p in get_file_list(start_dir):

        if p.is_file():

            exts.add(p.suffix.lower())

        spinner.next()

    duration = time.time() - start

    print('\n')

    print('Completed looking for unique file extensions')
    print(
        f'found {len(exts)} extensions in: {time.strftime("%H:%M:%S", time.gmtime(duration))}'
    )
    print(f'Extensions found: {exts}')

    return exts
예제 #3
0
async def run_bar(message, timeout, sertver_started, interval=0.1):
    bar = MoonSpinner(message)
    spent = 0
    while spent < timeout and not sertver_started.done():
        bar.next()
        await asyncio.sleep(interval)
        spent += interval
    bar.finish()
예제 #4
0
def main():
    status_code = '200'
    path = ''
    thmflag = []
    host = 'http://10.10.169.100:3000/'
    with MoonSpinner('Processing...') as bar:
        while path != 'end':
            response = requests.get(host + path)
            status_code = response.status_code
            json_response = response.json()
            path = str(json_response['next'])
            flag_value = str(json_response['value'])
            if flag_value != 'end':
                thmflag.append(flag_value)
            bar.next()
    finalflag = "".join(thmflag)
    print(f'Flag: {finalflag}')
예제 #5
0
    def _wait_saving(self):
        with MoonSpinner('Saving your device image: ') as bar:
            elapsed_time = 0

            while self.image.status == 'saving':
                bar.next()
                sleep(.25)
                elapsed_time += .25
                if elapsed_time % 3 == 0:
                    self.image.refresh()
            bar.finish()
        if self.image.status == 'ready':
            print('Your image has been saved')
            return
        if 'error' in self.image.status:
            print(
                "There was an error saving you instance please contact F0cal")
            exit(1)
예제 #6
0
def u_convlstm_trainer():
    train_dataset, test_dataset = train_input_fn()
    optimizer = tf.keras.optimizers.Adam(learning_rate=hp.lr)
    model = UConvlstm(hp)
    model_loss = Loss(model)

    checkpoint_file = hp.ckpt
    if checkpoint_file == '':
        checkpoint_file = 'uconvlstm-ckp_0'
    else:
        model.load_weights(f'{hp.single_gpu_model_dir}/{checkpoint_file}')

    logger.add(
        f"{hp.logdir}/{hp.in_seqlen}_{hp.out_seqlen}_{hp.lead_time}_train.log",
        enqueue=True)

    for epoch in range(hp.num_epochs):
        for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
            start = time.clock()
            with tf.GradientTape() as tape:
                y_predict = model(x_batch_train, training=True)
                print("y_pred:", y_predict.shape)
                print("y_batch:", y_batch_train.shape)
                loss_ssim, loss_l2, loss_l1, loss = model_loss(
                    [y_predict, y_batch_train])
            grads = tape.gradient(loss, model.trainable_weights)
            optimizer.apply_gradients(zip(grads, model.trainable_weights))
            elapsed = (time.clock() - start)
            template = ("step {} loss is {:1.5f}, "
                        "loss ssim is {:1.5f}, "
                        "loss l2 is {:1.5f}, "
                        "loss l1 is {:1.5f}."
                        "({:1.2f}s/step)")
            logger.info(
                template.format(step, loss.numpy(), loss_ssim.numpy(),
                                loss_l2.numpy(), loss_l1.numpy(), elapsed))

        if epoch % hp.num_epoch_record == 0:
            loss_test = 0
            loss_ssim_test = 0
            loss_l2_test = 0
            loss_l1_test = 0
            count = 0
            spinner = MoonSpinner('Testing ')
            for step, (x_batch_test, y_batch_test) in enumerate(test_dataset):
                y_predict = model(x_batch_test, training=False)
                loss_ssim, loss_l2, loss_l1, loss = model_loss(
                    [y_predict, y_batch_test])
                loss_ssim_test += loss_ssim.numpy()
                loss_l2_test += loss_l2.numpy()
                loss_l1_test += loss_l1.numpy()
                loss_test += loss.numpy()
                count += 1
                spinner.next()
            spinner.finish()
            logger.info("TEST COMPLETE!")
            template = ("TEST DATASET STATISTICS: "
                        "loss is {:1.5f}, "
                        "loss ssim is {:1.5f}, "
                        "loss l2 is {:1.5f}, "
                        "loss l1 is {:1.5f}.")
            logger.info(
                template.format(loss_test / count, loss_ssim_test / count,
                                loss_l2_test / count, loss_l1_test / count))

            total_epoch = int(re.findall("\d+", checkpoint_file)[0])
            checkpoint_file = checkpoint_file.replace(f'_{total_epoch}',
                                                      f'_{total_epoch + 1}')
            model.save_weights(f'{hp.single_gpu_model_dir}/{checkpoint_file}',
                               save_format='tf')
            logger.info("Saved checkpoint_file {}".format(checkpoint_file))
예제 #7
0
password = input("Enter the administrative password: "******"password": password})

if connectASpace.status_code == 200:
    print("Successfully connected to the ASpace backend!")
    sessionID = connectASpace.json()["session"]
    headers = {'X-ArchivesSpace-Session': sessionID}
else:
    print(connectASpace.status_code)

ethno = load_pickled('./data/ethno.txt')

spinner = MoonSpinner('loading the resource trees...')
state = 'loading'

ao_ids = []
while state != 'FINISHED':
    for i in ethno:
        if 'Crocodile' not in i['title']:
            tree = get_item(i['uri'] + '/tree')
            if tree['children'] != []:
                for child in tree['children']:
                    ao_ids.append(child['id'])
                    spinner.next()
            else:
                spinner.next()
        else:
            spinner.next()
예제 #8
0
import cv2, os, base64
from Pipeline import GripPipeline
from yattag import Doc
from progress.spinner import MoonSpinner
gp1 = GripPipeline(True)
gp2 = GripPipeline(False)
returns = {}
report = {}
spinner = MoonSpinner("Generating coverage report... ")
for i in os.listdir("test_images"):
    cap = cv2.imread("test_images/" + i)
    spinner.next()
    if i.startswith("red_"):
        spinner.next()
        gp2.process(cap)
        spinner.next()
        returns[i] = gp2.filter_contours_output
    else:
        spinner.next()
        gp1.process(cap)
        spinner.next()
        returns[i] = gp1.filter_contours_output
for x, y in returns.items():
    if y == []:
        spinner.next()
        report[x] = [
            False, "data:image/jpeg;base64," + str(
                base64.b64encode(
                    cv2.imencode(".jpg", cv2.imread("test_images/" + x))[1]),
                "utf-8")
        ]
예제 #9
0
from time import sleep
from progress.spinner import MoonSpinner

with MoonSpinner('Procesando ...') as bar:
    for i in range(100):
        sleep(0.04)
        bar.next()
예제 #10
0
state = 'Running'
spinner = LineSpinner('Running ')
counter = 0
while state != 'FINISHED':
    # Do some work
    time.sleep(.1)
    spinner.next()
    counter += 1
    if counter > 20:
        state = 'FINISHED'
sys.stdout.write('\b' * 11)
sys.stdout.flush()
sys.stdout.write('\r ')
print("Finished")
state = 'Running'
spinner = MoonSpinner('Running ')
counter = 0
while state != 'FINISHED':
    # Do some work
    time.sleep(.1)
    spinner.next()
    counter += 1
    if counter > 20:
        state = 'FINISHED'
sys.stdout.write('\b' * 11)
sys.stdout.flush()
sys.stdout.write('\r ')
print("Finished")
iCount = 0
bar = IncrementalBar('Mirgrating process e_gdbgra',
                     max=10,
예제 #11
0
 def __init__(self):
     self.progress_bar = MoonSpinner('Calculating page ranks')
예제 #12
0
class PageRank:

    def __init__(self):
        self.progress_bar = MoonSpinner('Calculating page ranks')

    def pageRank(self):
        sourceDirectory =settings.PAGERANK_RESOURCE_DIRECTORY
        destDirectory = PAGERANK_DESTINATION_DIRECTORY
        docs = []
        id2index = {}
        # print('start read files')
        # read files
        for file in map(lambda x: os.path.join(sourceDirectory,x),list_files(sourceDirectory, '*.json')):
            with open(file, 'r') as readFile:
                doc = json.load(readFile)
            id2index[doc['id']] = len(docs)
            self.progress_bar.next()
            docs.append(doc)
        # print('start calc page rank')
        # create links matrix
        n = len(docs)
        p = []
        for doc in docs:
            pp = [0] * n
            for linkID in filter(lambda x: x in id2index.keys() , (set(doc['cited_in']) |set(doc['refrences'])) ):
                pp[id2index[linkID]] = 1
            p.append(pp)

        # calculate page rank
        pr = self.pageRankMathCalculation(p,PAGERANK_ALFA,PAGERANK_ERROR)

        # print('start save files')
        # save docs
        os.makedirs(destDirectory, exist_ok=True)
        for doc,pagerank in zip(docs,pr):
            doc['pageRank'] = pagerank
            file_name = '{}.json'.format(doc['id'])
            with open(os.path.join(destDirectory , file_name), 'w') as outfile:
                json.dump(doc, outfile)
        # print('end page rank')

    def pageRankMathCalculation(self, p, alfa, error):
        n = len(p)
        sum = np.sum(p, axis=1)
        for i in range(0, n):
            if sum[i] == 0:
                p[i] = np.repeat(1 / n, n)
            else:
                p[i] = np.divide(p[i], sum[i])
        v1 = np.repeat(1 / n, n)
        v = np.tile(v1, [n, 1])

        p = np.add(np.dot(p, (1 - alfa)), np.dot(v, alfa))
        x = np.zeros(n)
        x[0] = 1
        step = 0
        while True:
            step += 1
            pervx = x
            x = np.dot(x, p)
            self.progress_bar.next()
            if self.calcError(pervx, x) < error:
                self.progress_bar.finish()
                break

        # print('end step = '+ step.__str__())
        return x

    def calcError(self, perv, new):
        sum = 0
        for i in range(0, len(new)):
            sum += abs(new[i] - perv[i])
        return sum
예제 #13
0
                     "loss l1 is {:1.5f}."
                     "({:1.2f}s/step)")
         logger.info(
             template.format(step, loss.numpy(), loss_ssim.numpy(),
                             loss_l2.numpy(), loss_l1.numpy(), elapsed))
 if epoch % hp.num_epoch_record == 0:
     total_test = 0
     for step, (x_batch_test,
                ys_batch_test) in enumerate(test_dist_dataset):
         total_test += 1
     loss_test = 0
     loss_ssim_test = 0
     loss_l2_test = 0
     loss_l1_test = 0
     count = 0
     spinner = MoonSpinner('Testing ')
     for step, (x_batch_test,
                ys_batch_test) in enumerate(test_dist_dataset):
         if step < total_test - 1:
             loss_ssim, loss_l2, loss_l1, loss = distributed_step(
                 x_batch_test, ys_batch_test, model, flag='test')
             loss_ssim_test += loss_ssim.numpy()
             loss_l2_test += loss_l2.numpy()
             loss_l1_test += loss_l1.numpy()
             loss_test += loss.numpy()
             count += 1
         spinner.next()
     spinner.finish()
     logger.info("TEST COMPLETE!")
     template = ("TEST DATASET STATISTICS: "
                 "loss is {:1.5f}, "
import asyncio
from datetime import datetime

from progress.spinner import MoonSpinner

import environment as env
from CommonTools.FileTools import CsvFileTools
from Mining.Miner.TwitterLogin import TwitterConnection
from Mining.TwitterQueryMakers.TwitterQueries import TweetsGetter
from Server.ClientSide.Clients import Client
from Server.ServerTools.Helpers import convert_object_into_dict, decode_payload
from Server.ServerTools.Routes import TWEET_ROUTE

search_terms_file = "%s/tweet-search-terms.csv" % env.EXPERIMENTS_FOLDER

spinner = MoonSpinner()

# How often to update the terminal
NOTICE_LIMIT = 5000


# def make_tweet_searcher(credentials_file):
#     """"Utility to make it easier to recreate a new connection
#      if the original connection gets remotely reset
#      """
#     # Create a connection
#     connection = TwitterConnection( credentials_file )
#
#     # create the object which will execute searches
#     return TweetsGetter( connection )
async def run():
예제 #15
0
class OrmSaveQueue:
    spinner = MoonSpinner()

    def __init__( self, batch_size=environment.DB_QUEUE_SIZE ):
        # number of times save is called
        self._queryCount = 0
        # number of items actually saved
        self._saveCount = 0
        # number of items attempted to save
        self._saveAttemptCount = 0
        # number invalid tweets
        self._invalidCount = 0
        # number of updated tweets
        self._updatedCount = 0
        # number of users whose data has been updated from tweets
        self._usersUpdatedCount = 0

        self.batch_size = batch_size
        self.store = deque()

    def increment_query_count( self ):
        # increment the notification spinner
        type( self ).spinner.next()
        # add to the stored request count.
        self._queryCount += 1

    @gen.coroutine
    def enque( self, modelList: list, session=None ):
        """
        Push a list of users into the queue for saving to
        the db. Once the batch size has been reached,
        it will be saved.
        The session is an instance of a sqlalchemy session
        :param session:
        :type modelList: list
        """
        with (yield lock.acquire()):
            # Push the model objects into the queue
            # we need to use the lock so that no other
            # instance gets in the way
            [ self.store.append( r ) for r in modelList ]

        # if we've reached the batch size, we save them to the db
        # needs to be greater in case hit limit in middle of list
        if len( self.store ) >= self.batch_size:
            yield from self.save_queued( session )

    async def save_queued( self, session ):
        """Flushes the orm objects in the queue to the database"""
        self.increment_query_count()

        async with lock:
            if len( self.store ) == 0: return True

            b = [ self.store.pop() for _ in range( 0, len( self.store ) ) ]

            for o in b:
                self._saveAttemptCount += 1
                try:
                    session.add( o )
                    session.commit()
                    self._saveCount += 1
                except sqlalchemy.exc.IntegrityError as e:
                    # print('integrity error %s' % e)
                    # The obj already exists, so we can try updating it
                    # first, we get rid of the attempted save
                    session.rollback()
                    # now try updating
                    self.update_handler(o, session)
                except sqlalchemy.exc.DatabaseError as e:
                    # print('db error %s' % e)
                    self._invalidCount += 1
                    session.rollback()
                except sqlalchemy.orm.exc.FlushError:
                    self._invalidCount += 1
                    session.rollback()

            self.record_stats()

    def record_stats( self ):
        save_rate = self._saveCount / self._saveAttemptCount

        r = [ standard_timestamp(), self._saveAttemptCount, self._saveCount, save_rate, self._invalidCount, self._updatedCount, self._usersUpdatedCount ]
        write_csv( csvlog, r )

        Logger.log( " ------------ ---------------- ------------ " )
        Logger.log( "Save attempt count %s" % self._saveAttemptCount )
        Logger.log( "Save success count %s" % self._saveCount )
        Logger.log( "Save rate          %s" % save_rate )
        Logger.log( "Invalid tweets     %s" % self._invalidCount )
        Logger.log( "Updated tweets     %s" % self._updatedCount )
        Logger.log( "Updated users     %s" % self._usersUpdatedCount )
        self.reset_counts()

    def reset_counts( self ):
        self._saveAttemptCount = 0
        self._saveCount = 0
        self._invalidCount = 0
        self._updatedCount = 0
        self._usersUpdatedCount = 0

    def update_handler( self, ormObject , session):
        """This is called when there has been an integrity error,
        which indicates that the object already exists. It determines
        what sort of object we're dealing with and dispatches the appropriate
        task to update it
        """
        # Determine what we're dealing with
        if isinstance(ormObject, Tweet):
            if update_tweet_if_changed(ormObject, session):
                self._updatedCount += 1
                # print("updated %s" % self._updatedCount)

        elif isinstance(ormObject, User):
            # This is just what the handler for tweets does.
            # Was more ambitious under TWIT-38, but not sure need
            # all the extra apparatus
            session.merge(ormObject)
            session.commit()
            self._usersUpdatedCount += 1
def run_progress_bar(finished_event):
    with MoonSpinner('Processing...') as bar:
        while not finished_event.is_set():
            sleep(0.02)
            bar.next()
            finished_event.wait(0.2)
예제 #17
0
def main():
    train_dataset, test_dataset = train_input_fn()
    optimizer = tf.keras.optimizers.Adam(learning_rate=hp.lr)
    model = UTransformer(hp)
    model_loss = Loss(model)

    best_score = float('-inf')
    not_improved_count = 0

    checkpoint_file = hp.ckpt
    if checkpoint_file == '':
        checkpoint_file = 'ckp_0'
    else:
        model.load_weights(f'{hp.single_gpu_model_dir}/{checkpoint_file}')

    logger.add(f"{hp.logdir}/cmip_train.log", enqueue=True)

    for epoch in range(hp.num_epochs):
        for step, (x_batch_train, ys_batch_train) in enumerate(train_dataset):
            start = time.clock()
            with tf.GradientTape() as tape:
                y_predict = model([x_batch_train, ys_batch_train], training=True)
                loss_ssim, loss_l2, loss_l1, loss = model_loss([y_predict, ys_batch_train[1]])
            grads = tape.gradient(loss, model.trainable_weights)
            optimizer.apply_gradients(zip(grads, model.trainable_weights))
            elapsed = (time.clock() - start)
            template = ("step {} loss is {:1.5f}, "
                        "loss ssim is {:1.5f}, "
                        "loss l2 is {:1.5f}, "
                        "loss l1 is {:1.5f}."
                        "({:1.2f}s/step)")
            logger.info(template.format(step, loss.numpy(), loss_ssim.numpy(), loss_l2.numpy(), loss_l1.numpy(), elapsed))

        if epoch % hp.num_epoch_record == 0:
            loss_test = 0
            loss_ssim_test = 0
            loss_l2_test = 0
            loss_l1_test = 0
            count = 0
            y_true, y_pred = [], []
            spinner = MoonSpinner('Testing ')
            for step, (x_batch_test, ys_batch_test) in enumerate(test_dataset):
                y_predict = model([x_batch_test, ys_batch_test], training=False)
                loss_ssim, loss_l2, loss_l1, loss = model_loss([y_predict, ys_batch_test[1]])
                loss_ssim_test += loss_ssim.numpy()
                loss_l2_test += loss_l2.numpy()
                loss_l1_test += loss_l1.numpy()
                loss_test += loss.numpy()
                count += 1

                y_true.append(np.array(nino_seq(ys_batch_test[1][:, :, :, :, 0])))
                y_pred.append(np.array(nino_seq(y_predict[:, :, :, :, 0])))

                spinner.next()

            y_true = tf.concat(y_true, axis=0)
            y_pred = tf.concat(y_pred, axis=0)
            sco = score(y_true, y_pred)
            if sco > best_score:
                best_score = sco
                not_improved_count = 0
                best_state = True
            else:
                not_improved_count += 1
                best_state = False

            spinner.finish()
            logger.info("TEST COMPLETE!")
            template = ("TEST DATASET STATISTICS: "
                        "loss is {:1.5f}, "
                        "loss ssim is {:1.5f}, "
                        "loss l2 is {:1.5f}, "
                        "loss l1 is {:1.5f},"
                        "acc skill score is {:1.5f}.")
            logger.info(template.format(loss_test/count, loss_ssim_test/count, loss_l2_test/count, loss_l1_test/count, sco))

            total_epoch = int(re.findall("\d+", checkpoint_file)[0])
            checkpoint_file = checkpoint_file.replace(f'_{total_epoch}', f'_{total_epoch + 1}')
            # if not_improved_count == hp.early_stop_patience:
            #     print("Validation performance didn\'t improve for {} epochs. "  "Training stops.".format(
            #         hp.early_stop_patience))
            #     break
            # if best_state:
            model.save_weights(f'{hp.single_gpu_model_dir}/{checkpoint_file}', save_format='tf')
            # model.save("my_model")
            logger.info("Saved checkpoint_file {}".format(checkpoint_file))
예제 #18
0
class WordMapSaveQueue(object):
    spinner = MoonSpinner()

    def __init__(self,
                 batch_size=environment.DB_QUEUE_SIZE,
                 file_path=environment.MASTER_DB):
        self._queryCount = 0
        self.batch_size = batch_size
        self.store = deque()
        self.file_path = file_path
        self.user_query = """
        INSERT INTO word_map 
          (word, sentence_index, word_index, user_id) 
        VALUES (?, ?, ?, ?)
        """
        self.tweet_query = """
        INSERT INTO word_map 
          (word, sentence_index, word_index, tweet_id) 
        VALUES (?, ?, ?, ?)
        """
        # use the environent to set which word_map_table_creation_query we use
        self.query = self.tweet_query if environment.ITEM_TYPE == 'tweet' else self.user_query

    def increment_query_count(self):
        # increment the notification spinner
        type(self).spinner.next()
        # add to the stored request count.
        self._queryCount += 1

    @gen.coroutine
    def enqueue(self, result):
        rt = (result.text, result.sentence_index, result.word_index, result.id)
        with (yield lock.acquire()):
            self.store.appendleft(rt)
        if len(self.store) > self.batch_size:
            yield from self.save_queued()

    async def save_queued(self):
        """Saves all the items in the queue to the db
        To help with isolation levels From https://www.sqlite.org/lang_transaction.html
        Transactions can be deferred, immediate, or exclusive. The
        default transaction behavior is deferred. Deferred means that no locks are acquired on the database until the
        database is first accessed. Thus with a deferred transaction, the BEGIN statement itself does nothing to the
        filesystem. Locks are not acquired until the first read or write operation. The first read operation against
        a database creates a SHARED lock and the first write operation creates a RESERVED lock. Because the
        acquisition of locks is deferred until they are needed, it is possible that another thread or process could
        create a separate transaction and write to the database after the BEGIN on the current thread has executed.
        If the transaction is immediate, then RESERVED locks are acquired on all databases as soon as the BEGIN
        command is executed, without waiting for the database to be used. After a BEGIN IMMEDIATE, no other database
        connection will be able to write to the database or do a BEGIN IMMEDIATE or BEGIN EXCLUSIVE. Other processes
        can continue to read from the database, however. An exclusive transaction causes EXCLUSIVE locks to be
        acquired on all databases. After a BEGIN EXCLUSIVE, no other database connection except for read_uncommitted
        connections will be able to read the database and no other connection without exception will be able
        to write  the database until the transaction is complete.
        """
        self.increment_query_count()
        async with lock:
            try:
                if environment.TIME_LOGGING:
                    timestamp_writer(
                        environment.SERVER_SAVE_TIMESTAMP_LOG_FILE)

                # create a new connection so not sharing across threads (which is not allowed)
                conn = sqlite3.connect(self.file_path,
                                       isolation_level="EXCLUSIVE")
                # wrap in a transaction so that other processess can play nice
                with conn:
                    rs = [self.store.pop() for i in range(0, len(self.store))]

                    conn.executemany(self.query, rs)

            except Exception as e:
                print("error for file %s : %s" % (self.file_path, e))