Esempio n. 1
0
def register(new_acc, db_query, pers_inf, param):
    """ Add new user to database. (internal)

    Args:
        - new_acc (object): User object to be added to database of type ``db_query``

        - db_query (class): User class, e.g. ``User``

        - pers_inf (object): Object of type personal_information.PersonalInformation

        - param (dict): Contains the user id, e.g. `` {'user_id': user_id} ``

    Returns:
        - success (bool)
        - error (dict):
            - code (int): one of
                - 6 -- Account already existing
                - 99 -- Undefined
            - message (str): error code meaning
        - response (empty dict)

        (returns error only if success == False and response otherwise)
    """

    session = db.get_db_session()
    result = {}
    result['success'] = False
    try:
        # account already registered
        if session.query(db_query).filter_by(**param).count():
            session.close()
            Configuration.print_config()
            error = {}
            error['code'] = enums.Error.USER_ALREADY_EXISTS
            error['message'] = "account already exists"
            result['error'] = error
            return result
        else:
            session.add(new_acc)
            if pers_inf is not None:
                session.add(pers_inf)
                for code in UserDataType:
                    session.add(
                        PrivacyDefault(code.value, new_acc.user_id, 0, True))
            session.commit()
            session.close()
    except Exception:
        if Configuration.test_mode:
            logging.exception("An error occured:")
        session.rollback()
        session.close()
        error = {}
        error['code'] = enums.Error.UNDEFINED
        error['message'] = "undefined"
        result['error'] = error
        return result
    result['success'] = True
    result['response'] = {}
    return result
Esempio n. 2
0
def load_config():
    try:
        return Configuration("config.toml")
    except FileNotFoundError:
        from lib.config import generate_config, write_config
        generated = generate_config()
        towrite = write_config(generated, "config.toml")
        if towrite:
            return Configuration("config.toml")
        else:
            sys.exit("Couldn't load the configuration")
Esempio n. 3
0
def _setup_tag_invalid_meta_data_file(confirm_nuke):
    exif = ExifEditor(Configuration())
    index = Index(TEST_INDEX_LOCATION, exif)
    core = TieCoreImpl(exif, index)
    cli.run_cmd(["cp", READ_FILE, WRITE_FILE])
    frontend = FrontendTest(confirm_nuke, [])
    tie_main.run(core, RunOptions(["tag", "foo", WRITE_FILE]), frontend)
Esempio n. 4
0
def make_celery(flaskapp, test_mode=False):
    """ Launch Celery. """
    cel = celery.Celery(flaskapp.import_name)
    cel.conf.update(flaskapp.config)
    if test_mode:
        Configuration.enable_test_mode()
    cel.config_from_object(Configuration)
    taskbase = cel.Task

    class ContextTask(taskbase):
        """ Create a flask context task. """
        abstract = True

        def __call__(self, *args, **kwargs):
            with flaskapp.app_context():
                return taskbase.__call__(self, *args, **kwargs)

    cel.Task = ContextTask
    return cel
Esempio n. 5
0
    def main(testmode=False, workdir=lib_folder):
        if MynedataBackendCtl.backend_process is not None:
            return
        if not Configuration.initialized:
            Configuration.initialize()

        mynedata_backend_command = 'celery worker {tasks} {workdir} {celerybeat} {testmode} {logfile} {loglevel} {autoscale}'.format(
            tasks='-A lib.backend.tasks.app -B',
            workdir='--workdir {}'.format(workdir),
            celerybeat='--schedule=/tmp/celerybeat-schedule',
            testmode='--testmode' if testmode else '',
            logfile='--logfile=/opt/mynedata/log/backend.log',
            loglevel='--loglevel=debug',
            autoscale='--autoscale %s,%s' %
            (Configuration.concurrency_max, Configuration.concurrency_min),
        )

        if testmode:
            Configuration.enable_test_mode()
        Configuration.print_config()

        MynedataBackendCtl.backend_process = subprocess.Popen(
            mynedata_backend_command, shell=True)
        with open(PID_FILE, 'w') as f_pid:
            f_pid.write(str(MynedataBackendCtl.backend_process.pid) + '\n')
Esempio n. 6
0
 def save_game(self):
     self.has_highscore = True
     if self.name_input.text:
         Configuration.name = self.name_input.text
     if Configuration.is_highscore(self.score):
         Configuration.register_highscore(self.name_input.text, self.score)
     if Configuration.is_highest_score(self.score):
         self.highscorer, self.highscore = Configuration.highest_score
     Configuration.save()
Esempio n. 7
0
    def __init__(self, worker, test_mode, **options):
        Configuration.initialize()
        if test_mode:
            Configuration.enable_test_mode()
        Configuration.print_config()
        worker.app.config_from_object(Configuration)
        DatabaseConnector.initialize(target=Configuration.database_uri)

        # Initialize payments
        if Configuration.payment_mode in [
                PaymentMethod.BITCOIN_DIRECT,
                PaymentMethod.BITCOIN_QUERY_BASED,
                PaymentMethod.BITCOIN_CENTRAL
        ]:
            BitcoinConnector.initialize()
Esempio n. 8
0
def main(argv):
    parser = OptionParser()
    options, args = parser.parse_args_dict()
    cfg = Configuration(options)

    log_level = cfg.yaml['main']['log_level']
    numeric_level = getattr(logging, log_level.upper(), None)
    if not isinstance(numeric_level, int):
        raise ValueError('Invalid log level: %s' % log_level)
    logging.basicConfig(
        filename=(cfg.yaml['main']['log_dir']+"/mailer-"+datetime.now().strftime('%Y%m%d')+".log"),
        level=numeric_level,
        format='%(asctime)s %(levelname)s: %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S')
    logging.info('Started')

    cmds = getMail(cfg)

    for cmd in cmds:
        os.system(cmd)

    logging.info('Successfully finished')
Esempio n. 9
0
def parse_args(args_list: List[str]) -> Configuration:
    """Parse arguments"""
    parser = ArgumentParser(description="Minesweeper")

    parser.add_argument(
        "--size",
        "-s",
        help=
        f"Board side length, e.g. input 10 for a 10x10 board. (default {DEFAULT_BOARD_SIZE})",
        type=int,
        default=DEFAULT_BOARD_SIZE,
        dest="board_size",
    )

    parser.add_argument(
        "--bombs",
        "-b",
        help=
        f"Number of bombs to place on the board. (default {DEFAULT_NUM_BOMBS})",
        type=int,
        default=DEFAULT_NUM_BOMBS,
        dest="num_bombs",
    )

    parser.add_argument(
        "--guide",
        help="Show column and row numbers around the board.",
        action="store_true",
        dest="show_guide",
    )

    args = parser.parse_args(args_list)

    return Configuration(board_size=args.board_size,
                         num_bombs=args.num_bombs,
                         show_guide=args.show_guide)
Esempio n. 10
0
    cel = celery.Celery(flaskapp.import_name)
    cel.conf.update(flaskapp.config)
    if test_mode:
        Configuration.enable_test_mode()
    cel.config_from_object(Configuration)
    taskbase = cel.Task

    class ContextTask(taskbase):
        """ Create a flask context task. """
        abstract = True

        def __call__(self, *args, **kwargs):
            with flaskapp.app_context():
                return taskbase.__call__(self, *args, **kwargs)

    cel.Task = ContextTask
    return cel


Configuration.initialize()
logging.basicConfig(level=logging.DEBUG)
app = connexion.App(__name__)
app.add_api('api.yaml')
CORS(app.app, origins=[Configuration.frontend_url])
app.app.config.update(
    celery_broker_url=Configuration.broker_url,
    celery_result_backend=Configuration.result_backend,
)
make_celery(app.app, test_mode=Configuration.test_mode)
application = app.app
Esempio n. 11
0
def main(argv):
    parser = OptionParser()
    options, args = parser.parse_args_dict()

    cfg = Configuration(options)

    logging.basicConfig(
        filename=(cfg.yaml['main']['log_dir'] + "/preprocessor-" +
                  datetime.now().strftime('%Y%m%d') + ".log"),
        level=cfg.log_level,
        format='%(asctime)s %(levelname)s: %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S')
    logging.info('Started')

    data_dir = cfg.yaml['main']['learn_dir']

    picture_dir = data_dir + "/pictures"
    tf_dirs = [
        name for name in os.listdir(picture_dir)
        if os.path.isdir(os.path.join(picture_dir, name))
    ]

    total_pp_files = 0
    new_pp_files = 0

    for d in sorted(tf_dirs):
        out_dir = data_dir + "/diff-%s/%s" % (image_size_x, d)
        cmd = "mkdir -p %s" % (out_dir)
        p = subprocess.Popen([
            cmd,
        ],
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE,
                             shell=True)
        (out, err) = p.communicate()

        in_dir = picture_dir + "/" + d
        files = [
            name for name in os.listdir(in_dir)
            if os.path.isfile(os.path.join(in_dir, name))
        ]
        it = iter(sorted(files))
        for f1, f2 in zip(it, it):
            logging.debug("Processing files %s %s", f1, f2)
            df1 = in_dir + "/" + f1
            df2 = in_dir + "/" + f2

            # cernobily 1 kanalovy diff
            diff_file = out_dir + "/" + ("%s-diff2.png" % f2[:-4])
            if os.path.exists(diff_file):
                logging.debug("%s already exists", diff_file)
            else:
                img1 = Image.open(df1)
                img2 = Image.open(df2)
                img1 = img1.resize((image_size_x, image_size_y),
                                   Image.ANTIALIAS)
                img2 = img2.resize((image_size_x, image_size_y),
                                   Image.ANTIALIAS)
                img1 = img1.convert('L')
                img2 = img2.convert('L')
                np_img1 = np.array(img1.getdata(), dtype=np.uint8).reshape(
                    (image_size_y, image_size_x))
                np_img2 = np.array(img2.getdata(), dtype=np.uint8).reshape(
                    (image_size_y, image_size_x))
                np_img_diff = difference_image(np_img1, np_img2)
                img_diff = Image.fromarray(np_img_diff, mode='L')
                img_diff.save(diff_file)
                logging.info("%s written", diff_file)
                #sleep(0.5)


#       # barevny 3 kanalovy diff
#       diff_file_3 = out_dir + "/" + ("%s-diffc.png" % f2[:-4])
#       if os.path.exists(diff_file_3):
#           logging.debug("%s already exists", diff_file_3)
#       else:
#           img1 = Image.open(df1)
#           img2 = Image.open(df2)
#           img1 = img1.resize((image_size_x, image_size_y), Image.ANTIALIAS)
#           img2 = img2.resize((image_size_x, image_size_y), Image.ANTIALIAS)
#           np_img1 = np.array(img1.getdata(),dtype=np.uint8).reshape((image_size_y, image_size_x, 3))
#           np_img2 = np.array(img2.getdata(),dtype=np.uint8).reshape((image_size_y, image_size_x, 3))
#           np_img_diff = difference_image(np_img1, np_img2)
#           img_diff = Image.fromarray(np_img_diff)
#           img_diff.save(diff_file_3)
#           logging.info("%s written", diff_file_3)

            pp_file = out_dir + "/" + ("%s-c%s.pp" % (f2[:-4], cluster_size))
            if os.path.exists(pp_file):
                logging.debug("%s already exists", pp_file)
            else:
                npi = read_preprocess_image(diff_file, cluster_size)
                f = open(pp_file, "w")
                npi.astype(np.uint16).tofile(f)
                f.close()
                logging.info("%s written", pp_file)
                new_pp_files += 1

                # kontrolni zobrazeni preprocesovaneho souboru
                # normalizaci co rozsahu 0-255 a vykreslenim jako png
                if False:
                    npi_max = np.amax(npi)
                    npi_png = npi.astype(np.float64) / npi_max
                    npi_png = npi_png * 255
                    npi_png = npi_png.astype(np.uint8)
                    png = Image.fromarray(npi_png, mode='L')
                    png.save(pp_file + ".png")

                #sleep(0.5)
            total_pp_files += 1

    logging.info("Found %s new diff files", new_pp_files)
    logging.info("Found %s total diff files", total_pp_files)

    if not new_pp_files:
        logging.info("No now diff files found")
        sys.exit(1)
Esempio n. 12
0
def main(argv):
    parser = OptionParser()
    options, args = parser.parse_args_dict()

    cfg = Configuration(options)

    learn_dir = cfg.yaml['main']['learn_dir']
    model_dir = cfg.yaml['main']['model_dir']
    data_dir = "%s/diff-%s" % (learn_dir, image_size_x)

    model_name = "model-d%s-c%s-1h%s-2h%s" % (image_div, cluster_size,
                                              n_hidden_1, n_hidden_2)
    print model_name

    # Construct model
    model = MultilayerPerceptron(n_input, n_hidden_1, n_hidden_2, n_classes)

    # Define loss function
    cost = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=model.out_layer,
            labels=y))  #label je index spravneho vystupu
    # Define optimizer
    global_step = tf.Variable(0, name='global_step', trainable=False)
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(
        cost, global_step=global_step)
    #TODO vyzkouset jine optimalizace
    #optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost, global_step=global_step)

    # Initializing the variables
    init = tf.global_variables_initializer()

    # Test model
    correct_prediction = tf.equal(tf.argmax(model.out_layer, 1), y)
    # Calculate accuracy
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    # Create a saver for writing training checkpoints.
    saver = tf.train.Saver()

    # Launch the graph
    with tf.Session() as sess:
        sess.run(init)

        train_files, test_files = get_files(data_dir)
        n_train = len(train_files)
        print "reading testing data"
        start = time.time()
        test_images, test_labels = get_images_labels(test_files)
        end = time.time()
        print "testing data complete (%s s)" % (end - start)

        print "reading training data"
        start = time.time()
        train_images, train_labels = get_images_labels(train_files)
        end = time.time()
        print "training data complete (%s s)" % (end - start)

        # Training cycle
        for epoch in range(training_epochs):
            avg_cost = 0.
            index_in_epoch = 0
            total_batches = int(n_train / batch_size)

            # nahodne zamichame obrazky i labely ve stejnem poradi
            # random.random nefunguje spravne na vicerozmerne np.array
            # TODO je to k necemu?
            r = np.arange(len(train_images))
            np.random.shuffle(r)
            train_images = train_images[r]
            train_labels = train_labels[r]

            # pokud data nejsou delitelna davkou, chcem pouzit i zbytek
            if n_train > total_batches * batch_size:
                total_batches += 1
            # Loop over all batches
            for i in range(total_batches):
                _, c = sess.run(
                    [optimizer, cost],
                    feed_dict={
                        model.input_ph:
                        train_images[index_in_epoch:index_in_epoch +
                                     batch_size],
                        y:
                        train_labels[index_in_epoch:index_in_epoch +
                                     batch_size]
                    })
                # Compute average loss
                avg_cost += c / total_batches
                index_in_epoch += batch_size

            # Display logs per epoch step
            print("Epoch:", '%04d' % (epoch), "cost=", \
                "{:.9f}".format(avg_cost))
            if epoch % eval_step == 0:
                train_acc = accuracy.eval({
                    model.input_ph: train_images,
                    y: train_labels
                })
                print("Accuracy on train images:", train_acc)
                if n_test_pct:
                    test_acc = accuracy.eval({
                        model.input_ph: test_images,
                        y: test_labels
                    })
                    print("Accuracy on test images:", test_acc)
                    if test_acc == 1.0 or avg_cost < 1.0:
                        break
                elif train_acc == 1.0:
                    break
            if epoch and save_step and epoch % save_step == 0:
                saver.save(sess,
                           "%s/%s" % (learn_dir, model_name),
                           global_step=epoch)
            # toto je tu zamerne, kvuli snizeni vytizeni procesoru
            #sleep(0.5)
        print("Optimization Finished!")

        saver.save(sess, "%s/%s" % (model_dir, model_name))

        # TODO presunout do testeru
        print "prediction mismatches in test data:"
        dt = 0
        df = 0
        mt = 0
        mf = 0
        for n, test_image in enumerate(test_images):
            cl = sess.run(tf.argmax(model.out_layer, 1),
                          feed_dict={model.input_ph: [test_image]})
            label = test_labels[n]
            if label:
                dt = dt + 1
            else:
                df = df + 1
            if cl != label:
                print "Prediction: %s, label: %s, test file: %s" % (
                    cl, label, test_files[n])
                if label:
                    mt = mt + 1
                else:
                    mf = mf + 1
        print "Model %s" % model_name
        train_acc = accuracy.eval({
            model.input_ph: train_images,
            y: train_labels
        })
        test_acc = accuracy.eval({model.input_ph: test_images, y: test_labels})
        print("Accuracy on train images:", train_acc)
        print("Accuracy on test images:", test_acc)
        print "%s/%s true and %s/%s false mismatches on test images" % (mt, dt,
                                                                        mf, df)
Esempio n. 13
0
def main():
    parser = OptionParser()
    parser.add_option('--once',
                      action='store_true',
                      help='start once, only to process batch')
    parser.add_option(
        '--noftp',
        action='store_true',
        help='prevent loading files from FTP server (overrides config)')
    parser.add_option('--nomail',
                      action='store_true',
                      help='prevent sending mail (overrides config)')
    options, args = parser.parse_args_dict()

    cfg = Configuration(options)
    logger = Logger(cfg.yaml)
    logger.log("Starting camera filter")

    # Construct model
    model_path = cfg.yaml['main']['model_dir'] + '/' + cfg.yaml['classifier'][
        'model_name']
    model = MultilayerPerceptron(cfg.n_input,
                                 cfg.yaml['classifier']['n_hidden_1'],
                                 cfg.yaml['classifier']['n_hidden_2'],
                                 cfg.yaml['classifier']['n_classes'])
    # Initializing the variables
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)
    saver = tf.train.Saver()
    logger.log("Opening model %s" % model_path)
    saver.restore(sess, model_path)

    prev_hour = get_hour()
    stats_true = 0
    stats_false = 0

    if cfg.ftp_opt:
        ftp = connect_ftp(cfg.yaml)

    err_count = 0

    while (err_count < 2):
        try:
            hour = get_hour()
            if prev_hour != hour:
                # logrotate
                if hour == 0:
                    logger.rotate()

                # monitoring + hodinove statistiky
                p = subprocess.Popen([
                    "du -sh /www/camera-filter/",
                ],
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.PIPE,
                                     shell=True)
                (out, err) = p.communicate()
                status = p.wait()
                text = (
                    "Hourly statistics:\nALARMS count: %s\nFalse alarms count: %s\n\nError count: %s\n\nTotal size of dir: %s"
                    % (stats_true, stats_false, err_count, out))
                logger.log(text)
                if cfg.mail_opt:
                    send_mail(logger, cfg.yaml, "Camera stats", text)

                stats_true = 0
                stats_false = 0
                err_count = 0
                prev_hour = hour

            if cfg.ftp_opt:
                fetch_files(ftp, cfg.yaml, logger)

            # TODO lepsi prace se soubory, pres pythoni libky
            # TODO lepe zpracovat davku (nenacitat znovu porad dokola)
            # TODO co se souborama s nulovym timestampem? (rozhodi nam parovani fotek)
            p = subprocess.Popen(
                "find %s -maxdepth 1 -type f | grep ARC | grep -v diff | sort -r | head -n%s"
                % (cfg.yaml["main"]["input_dir"], cfg.input_batch_size),
                stdout=subprocess.PIPE,
                shell=True)
            (output, err) = p.communicate()
            p_status = p.wait()

            files = output.split()
            if not files:
                if cfg.once_opt:
                    # pokud nechcem sluzbu a mame zpracovano, koncime
                    break
                logger.log("no files to compare, sleeping for %s seconds.." %
                           cfg.yaml['main']['sleep_sec'])
                sleep(cfg.yaml['main']['sleep_sec'])
                continue
            if len(files) < cfg.input_batch_size:
                logger.log("less than %s files to compare, trying again.." %
                           cfg.input_batch_size)
                sleep(1)
                continue

            logger.log("%s files to compare: %s" % (len(files), files))

            if process_mp(cfg, logger, sess, model, files):
                stats_true += 1
            else:
                stats_false += 1

            sleep(1)

        except Exception as ex:
            print ex
            traceback.print_exc(file=stdout)
            logger.log("%s" % ex, level="ERROR")
            traceback.print_exc(file=logger.log_file)
            if cfg.mail_opt:
                try:
                    send_mail(logger,
                              cfg.yaml,
                              'Camera ALARM Error',
                              "%s" % ex,
                              notify=True)
                except Exception as ex:
                    logger.log("sending error mail failed")
                    logger.log("%s" % ex, level="ERROR")
            err_count += 1
            logger.log("error counter %s" % err_count)
            sleep(err_count)

    logger.log("Too many errors in one hour, giving up...")

    if cfg.ftp_opt:
        ftp.quit()

    logger.close()
Esempio n. 14
0
 def setUp(self):
     _remove_index()
     self.exif = ee.ExifEditor(Configuration())
     self.files_base_path = _path_to_linkname(os.path.abspath("../res"))
     self.index = Index(TEST_INDEX_LOCATION, self.exif)
     self.tie_core = TieCoreImpl(self.exif, self.index)
Esempio n. 15
0
#!/usr/bin/env python3

""" This script starts the myneData API. """

import sys
import os
from gevent import monkey

lib_folder = '/'.join(os.path.split(os.path.dirname(sys.path[0]))[:-1])
sys.path.insert(0, lib_folder)

from lib.config import Configuration
from lib.api import endpoint
monkey.patch_all()


if __name__ == '__main__':
    Configuration.initialize()
    Configuration.print_config()

    endpoint.app.debug = True
    endpoint.app.run(port=int(Configuration.api_port), server='gevent')
Esempio n. 16
0
class OpenDictApp(wx.App):
    """Top-level class of wxWidgets application"""

    locale = wx.Locale()

    def OnInit(self):

        _ = wx.GetTranslation
        _start = time.time()

        wx.Version = []
        try:
            wx.Version = wx.__version__
        except Exception, e:
            try:
                wx.Version = wx.Python.__version__
            except:
                pass

        if wx.Version.split('.') < ['2', '8']:
            from lib.gui import errorwin

            title = _("wxPython Version Error")
            msg = _("wxPython %s is installed on this system.\n\n"
                    "OpenDict %s requires wxPython 2.8 or newer to run smoothly.\n\n"
                    "You can find wxPython at "
                    "http://www.wxpython.org or you can "
                    "install it using your system package manager.") \
                    % (wx.Version, info.VERSION)
            errorwin.showErrorMessage(title, msg)
            return False

        util.makeDirectories()

        systemLog(DEBUG, "Unicode version: %s" % wx.USE_UNICODE)

        # Init gettext support
        wx.Locale_AddCatalogLookupPathPrefix(
            os.path.join(info.GLOBAL_HOME, 'po'))
        self.locale.Init(wx.LANGUAGE_DEFAULT)
        self.locale.AddCatalog('opendict')

        # Data cache instance
        self.cache = {}

        # Dictionaries container
        # Mapping: name -> object
        self.dictionaries = {}

        # Failed dictionaries.
        # For error message that may be shown after creating main window
        self.invalidDictionaries = []

        self.config = Configuration()
        self.config.load()

        self.agreements = util.AgreementsManager(
            os.path.join(info.LOCAL_HOME, 'agreements.txt'))

        # Set unique ids
        for plugin in newplugin.loadDictionaryPlugins(
                self.dictionaries, self.invalidDictionaries):
            self.config.ids[wx.NewId()] = plugin.getName()

        for plain in plaindict.loadPlainDictionaries(self.dictionaries):
            self.config.ids[wx.NewId()] = plain.getName()

        for d in self.dictionaries.values():
            if not self.config.activedict.init:
                if not self.config.activedict.enabled(d.getName()):
                    d.setActive(active=False)
            else:
                # Fill up with names if not initialized yet
                self.config.activedict.add(d.getName())

        windowPos = (int(self.config.get('windowPosX')),
                     int(self.config.get('windowPosY')))
        windowSize = (int(self.config.get('windowWidth')),
                      int(self.config.get('windowHeight')))

        self.window = MainWindow(None,
                                 -1,
                                 "OpenDict",
                                 windowPos,
                                 windowSize,
                                 style=wx.DEFAULT_FRAME_STYLE)

        try:
            systemLog(INFO, "OpenDict %s" % info.VERSION)
            systemLog(INFO, "wxPython %s" % wx.Version)
            systemLog(INFO, "Global home: %s:" % info.GLOBAL_HOME)
            systemLog(INFO, "Local home: %s" % info.LOCAL_HOME)
            systemLog(DEBUG, "Loaded in %f seconds" % (time.time() - _start))
        except Exception, e:
            print "Logger Error: Unable to write to log (%s)" % e
Esempio n. 17
0
        - timestamp
        - nonce
        """
        self.config.accounts[self.acc_name]['timestamp'] = str(int(
            time.time()))
        self.config.accounts[self.acc_name]['nonce'] = self.get_nonce()

    def get_nonce(self):
        """Unique token generated for each request"""
        n = base64.b64encode(''.join(
            [str(random.randint(0, 9)) for i in range(24)]).encode('utf-8'))
        return str(n.decode('utf-8'))


if __name__ == '__main__':
    folder = '/Users/peter/Workspace/FincLab/settings/'

    # Reset Program Settings
    Config.restore_to_defaults()

    # Load new configurations
    Config = Configuration(folder)

    yf = YahooOAuth(config=Config)
    print(
        yf.parse(
            'select * from yahoo.finance.historicaldata where symbol = "000058.sz" and startDate = "2015-08-26" and endDate = "2015-08-28"'
        ))
    print('current time is: ', time.time())
    print('Done!')
Esempio n. 18
0
 def setUp(self):
     _remove_index()
     self.index = Index(TEST_INDEX_LOCATION, ee.ExifEditor(Configuration()))
     self.files_base_path = _path_to_linkname(os.path.abspath("../res"))
Esempio n. 19
0
 def setUp(self):
     self.ee = exif_editor.ExifEditor(Configuration())
import asyncio
from pprint import pformat
from azure.eventhub.aio import EventHubConsumerClient, EventHubSharedKeyCredential
from azure.eventhub.extensions.checkpointstoreblobaio import BlobCheckpointStore
from lib.config import ConsumerConfig, Configuration
from lib.storage import PostgresMessageStorageDelegate, MessageStorageDelegate
from lib.handler import MessageHandler
import logging

logging.basicConfig(
    format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
    datefmt='%Y-%m-%d:%H:%M:%S',
    level=Configuration.log_level(),
)
logger = logging.getLogger(__name__)


async def errored(partition_context, error):
    """
    The callback function that will be called when an error is raised during receiving after retry
    attempts are exhausted, or during the process of load-balancing.

    :param azure.eventhub.PartitionContext partition_context: The EventHub partition context.
    :param error: the exception
    :return: None
    """
    if partition_context:
        logger.error("An exception: {} occurred during receiving from Partition: {}.".format(
            partition_context.partition_id if partition_context else None, error
        ))
    else:
Esempio n. 21
0
 def setUp(self):
     self.frontend = FrontendTest(UserReply.yes, ["foo", "bar"])
     self.ee = ExifEditor(Configuration())