예제 #1
0
파일: pyref.py 프로젝트: luizbag/pyref
	def on_abrir_menu_item_activate(self, widget):
                self.file_chooser.set_action(Gtk.FileChooserAction.OPEN)
                response = self.file_chooser.run()
                if response == Gtk.ResponseType.OK:
                        self.filename = self.file_chooser.get_filename()
                        models.init(self.filename)
                        models.open()
                        self.file_chooser.hide()
예제 #2
0
파일: pyref.py 프로젝트: luizbag/pyref
	def on_novo_menu_item_activate(self, widget):
                self.file_chooser.set_action(Gtk.FileChooserAction.SAVE)
                response = self.file_chooser.run()
                if response == Gtk.ResponseType.OK:
                        self.filename = self.file_chooser.get_filename()
                        models.init(self.filename)
                        models.open()
                        models.create_tables()
                        self.file_chooser.hide()
예제 #3
0
파일: elastic.py 프로젝트: kyushy/kcing
def feed(args):
    """
    Scan last two days worth of data from KernelCI website/storage
    and send it to an ES instance, respecting its limitations
    """
    logger.info('Feeding ES')

    if not _is_es_ok():
        return -1

    if not _is_data_dir_ok():
        return -1

    models.init()

    kci = KernelCI()

    # If builds or lavas are exclusively passed on command line, ignore the other one
    # otherwise it'd retrieve the regular feed_es data size (past 2 days)
    if args.builds or args.lavas or args.boots:
        builds = args.builds or {}
        lavas = args.lavas or {}
        boots = args.boots or {}
    else:
        builds = _download('build', kci.get_builds(args.how_many))
        lavas = _download('lava', kci.get_lavas(args.how_many))

        # During download, some lava files might've been switched to boot files
        # so let's just separate them and filter them out of lavas dictonary
        boots = {
            _id: lavas[_id]
            for _id in lavas.keys() if 'boot' in lavas[_id]
        }
        for _id in boots.keys():
            del lavas[_id]

        # Delete old objects that are no longer needed
        models.delete_old()

    logger.info(
        'Working on %i lavas, %i builds and %i boots from KernelCI/command line'
        % (len(lavas), len(builds), len(boots)))

    saved_lavas, failed_lavas = _send_to_es('lava', lavas)
    saved_builds, failed_builds = _send_to_es('build', builds)
    saved_boots, failed_boots = _send_to_es('boot', boots)

    models.end()

    logger.info('Lavas: sent %i to ES, %i failed' %
                (len(saved_lavas), len(failed_lavas.keys())))
    logger.info('Builds: sent %i to ES, %i failed' %
                (len(saved_builds), len(failed_builds.keys())))
    logger.info('Boots: sent %i to ES, %i failed' %
                (len(saved_boots), len(failed_boots.keys())))
예제 #4
0
def main():
    tracer.instance(routes.app)
    sess = cql.session()
    models.init(sess)
    view = graphql.GraphQLView.as_view('graphql',
                                       schema=schema.schema,
                                       middleware=[tracer.middleware],
                                       graphiql=True,
                                       graphiql_template=schema.HTML)
    routes.app.add_url_rule("/graphql", view_func=view)
    routes.app.run(use_reloader=True, threaded=True)
예제 #5
0
파일: views.py 프로젝트: badp/weather
def vote_add(request):
  if request.method != "POST":
    return HttpResponseBadRequest()

  if not models.Team.objects.count():
    models.init()

  choice = request.POST.get("choice", None)
  models.UserProfile.of(request.user).vote(choice)

  return HttpResponseRedirect("/thanks")
예제 #6
0
    def run(self, args):
        models.init()
        meta.Base.metadata.create_all(bind=meta.session.bind.engine)

        # create user if needed
        query = meta.session.query(users.OdontuxUser)
        query = query.all()
        if not query:
            # create an admin user
            print('creating first user "admin"')
            print("password : please_change_password")
            print("Would be a great idea to change the admin password")
            admin_user = {
                "username": "******",
                "password": b64encode(scrypt.encrypt(os.urandom(64), "please_change_password", maxtime=0.5)),
                "role": constants.ROLE_ADMIN,
                "lastname": "admin",
                "firstname": "admin",
                "title": "M",
            }
            new_admin = users.OdontuxUser(**admin_user)
            meta.session.add(new_admin)
            meta.session.commit()

        # create gnucash payments types
        query = meta.session.query(compta.PaymentType)
        if not query.all():
            print("creating base for payments' types")
            payments_types = [
                ("Cash", "Cash"),
                ("CreditCard", "Credit card"),
                ("DebitCard", "Debit card"),
                ("Transfer", "Transfer"),
                ("Check", "Check"),
                ("Paypal", "Paypal"),
                ("Boleto", "Boleto"),
                ("Other", "Other"),
            ]
            for payment_type in payments_types:
                values = {"gnucash_name": payment_type[0], "odontux_name": payment_type[1]}
                new_payment_type = compta.PaymentType(**values)
                meta.session.add(new_payment_type)
                meta.session.commit()

        # create setting for sticker
        query = meta.session.query(users.Settings)
        query = query.all()
        if not query:
            print("creating key-value for sticker_position")
            values = {"key": "sticker_position", "value": "0"}
            new_setting = users.Settings(**values)
            meta.session.add(new_setting)
            meta.session.commit()
def add_user():
    print(
        "For better security it is recommended that you add users using a"
        " client on a different machine from the server, this utility is only"
        " designed for adding a user to an otherwise empty system.\n")
    init(config["connection_string"])
    u = User()
    while not u.username:
        u.username = input("Enter username: "******"Enter full name: ")
    while not u.email:
        u.email = input("Enter email: ")
    password = None
    while not password:
        password = getpass("Enter password: "******"y", "n"]:
        admin_response = input("Is user an admin? [y/n]: ")
    u.admin = admin_response == "y"

    print("Generating keys...", end="")
    sys.stdout.flush()

    private = RSA.generate(config["key_length"])
    public = private.publickey()

    salt = os.urandom(8)
    key = PBKDF2(password, salt).read(32)
    iv = os.urandom(16)
    cypher = AES.new(key, AES.MODE_CFB, iv)

    encrypted_private_key = cypher.encrypt(private.exportKey("DER"))

    u.public_key = base64.b64encode(public.exportKey("DER")).decode("UTF-8")
    u.encrypted_private_key = base64.b64encode(encrypted_private_key).decode(
        "UTF-8")
    u.pbkdf2_salt = base64.b64encode(salt).decode("UTF-8")
    u.aes_iv = base64.b64encode(iv).decode("UTF-8")

    auth_key = base64.b64encode(
        hashlib.pbkdf2_hmac("sha512", password.encode("UTF-8"),
                            u.username.encode("UTF-8"), 100000))

    u.auth_hash = bcrypt.hashpw(auth_key, bcrypt.gensalt()).decode("UTF-8")

    print("Done!")
    print("Adding user...", end="")
    sys.stdout.flush()
    db_session.add(u)
    db_session.commit()
    print("Done!")
def add_user():
    print("For better security it is recommended that you add users using a"
        " client on a different machine from the server, this utility is only"
        " designed for adding a user to an otherwise empty system.\n")
    init(config["connection_string"])
    u = User()
    while not u.username:
        u.username = input("Enter username: "******"Enter full name: ")
    while not u.email:
        u.email = input("Enter email: ")
    password = None
    while not password:
        password = getpass("Enter password: "******"y", "n"]:
        admin_response = input("Is user an admin? [y/n]: ")
    u.admin = admin_response == "y"

    print("Generating keys...", end="")
    sys.stdout.flush()

    private = RSA.generate(config["key_length"])
    public = private.publickey()

    salt = os.urandom(8)
    key = PBKDF2(password, salt).read(32)
    iv = os.urandom(16)
    cypher = AES.new(key, AES.MODE_CFB, iv)

    encrypted_private_key = cypher.encrypt(private.exportKey("DER"))

    u.public_key = base64.b64encode(public.exportKey("DER")).decode("UTF-8")
    u.encrypted_private_key = base64.b64encode(encrypted_private_key).decode(
        "UTF-8")
    u.pbkdf2_salt = base64.b64encode(salt).decode("UTF-8")
    u.aes_iv = base64.b64encode(iv).decode("UTF-8")

    auth_key = base64.b64encode(hashlib.pbkdf2_hmac("sha512",
        password.encode("UTF-8"), u.username.encode("UTF-8"), 100000))

    u.auth_hash = bcrypt.hashpw(auth_key, bcrypt.gensalt()).decode("UTF-8")

    print("Done!")
    print("Adding user...", end="")
    sys.stdout.flush()
    db_session.add(u)
    db_session.commit()
    print("Done!")
예제 #9
0
def main():
    # Init database
    models.init()
    print('')

    # Fill with Test Data
    models.test_data()
    print('')

    # Search
    search('sweater')
    print('')

    # List John's Products
    list_user_products(2)
    print('')

    # List all products tagged with 'knitted'
    list_products_per_tag(1)
    print('')

    # John now starts selling olive oil!
    product = models.Product(name='Olive Oil',
                             description='Fresh olive oil from the farm',
                             price=6.50,
                             quantity=10)
    add_product_to_catalog(2, product)
    print('')

    # List John's Products again
    list_user_products(2)
    print('')

    # Now there's only 5 steaks left in the webshop
    update_stock(5, 5)
    print('')

    # Let's make a transaction
    purchase_product(1, 2, 2)
    print('')

    # And remove a product
    remove_product(6)
    print('')

    # Search for something old fashioned
    search('grandma')
    print('')
예제 #10
0
def hubs_view(hub_name):
    session = models.init('sqlite:////var/tmp/hubs.db')
    hub = session.query(models.Hub)\
        .filter(models.Hub.name==hub_name)\
        .first()

    return flask.render_template('hubs.html', hub=hub, session=session)
def main():
    parser = argparse.ArgumentParser()
    group = parser.add_mutually_exclusive_group()
    group.add_argument("--run-server", action="store_true")
    group.add_argument("--initialise-db", action="store_true")
    group.add_argument("--add-user", action="store_true")
    args = parser.parse_args()

    if args.run_server:
        init(config["connection_string"])
        server.run(host="0.0.0.0", debug=True)
    elif args.initialise_db:
        initialise_db()
    elif args.add_user:
        add_user()
    else:
        parser.print_help()
def main():
    parser = argparse.ArgumentParser()
    group = parser.add_mutually_exclusive_group()
    group.add_argument("--run-server", action="store_true")
    group.add_argument("--initialise-db", action="store_true")
    group.add_argument("--add-user", action="store_true")
    args = parser.parse_args()

    if args.run_server:
        init(config["connection_string"])
        server.run(host="0.0.0.0", debug=True)
    elif args.initialise_db:
        initialise_db()
    elif args.add_user:
        add_user()
    else:
        parser.print_help()
예제 #13
0
def static_web_generation(config):
    """ Generate the whole static web pages """
    engine = models.init(config)
    session = models.get_session(engine)

    # Latest first
    passes = session().query(models.Passes).order_by(models.Passes.aos_time.asc())

    print passes
예제 #14
0
def session(hosts=["127.0.0.1"]):
    """
    session returns the Cassandra connection.
    """
    global sess, film_by_episode_stmt, films_stmt, people_stmt, planets_stmt, species_stmt, starships_stmt, vehicles_stmt
    if sess is None:
        # TODO (NF 2018-02-13): Use connection pool.
        lbp = policies.RoundRobinPolicy()
        sess = cluster.Cluster(hosts, load_balancing_policy=lbp).connect()
        models.init(sess)
        film_by_episode_stmt = sess.prepare(SELECT_FILM_BY_EPISODE)
        films_stmt = sess.prepare(SELECT_FILMS)
        people_stmt = sess.prepare(SELECT_PEOPLE)
        planets_stmt = sess.prepare(SELECT_PLANETS)
        species_stmt = sess.prepare(SELECT_SPECIES)
        starships_stmt = sess.prepare(SELECT_STARSHIPS)
        vehicles_stmt = sess.prepare(SELECT_VEHICLES)

    return sess
예제 #15
0
def tower_loss(scope, images, labels, model_name, num_labels):
    # Build inference Graph.
    logits = models.init(model_name, images, num_labels, is_training=True)
    # Build the portion of the Graph calculating the losses. Note that we will
    # assemble the total_loss using a custom function below.
    _ = models.get_mse_loss(logits, labels)
    # Assemble all of the losses for the current tower only.
    losses = tf.get_collection(tf.GraphKeys.LOSSES, scope)
    # Calculate the total loss for the current tower.
    total_loss = tf.add_n(losses, name='total_loss')
    return total_loss
예제 #16
0
def parse():
    conn, cursor = models.init()
    global mydata
    req = request.args
    if "company" in req:
        company_code = req["company"]
        mydata = models.execute(conn, cursor, ("company", company_code))
    elif "person" in req:
        name = req["person"]
        mydata = models.execute(conn, cursor, ("person", name))
    return render_template('test.html')
예제 #17
0
def setup(config_file):
    global CONFIG_FILE
    CONFIG_FILE = config_file
    models.init(get_setting('databasefile', os.path.join(THIS_DIR, 'transfers.db')))

    # Configure logging
    default_logfile = os.path.join(THIS_DIR, 'automate-transfer.log')
    CONFIG = {
        'version': 1,
        'disable_existing_loggers': False,
        'formatters': {
            'default': {
                'format': '%(levelname)-8s  %(asctime)s  %(filename)s:%(lineno)-4s %(message)s',
                'datefmt': '%Y-%m-%d %H:%M:%S',
            },
        },
        'handlers': {
            'console': {
                'class': 'logging.StreamHandler',
                'formatter': 'default',
            },
            'file': {
                'class': 'logging.handlers.RotatingFileHandler',
                'formatter': 'default',
                'filename': get_setting('logfile', default_logfile),
                'backupCount': 2,
                'maxBytes': 10 * 1024,
            },
        },
        'loggers': {
            'transfer': {
                'level': 'INFO',  # One of INFO, DEBUG, WARNING, ERROR, CRITICAL
                'handlers': ['console', 'file'],
            },
        },
    }
    logging.config.dictConfig(CONFIG)
예제 #18
0
def main():
    if 'test' not in settings.KCING_DB:
        logger.error(
            'Database for testing should contain the word "test" in it')
        logger.error(
            'Please specify a testing database passing KCING_DB env var when running tests'
        )
        return -1

    # Deletes current db to make sure we're working with a fresh one
    try:
        os.unlink(settings.KCING_DB)
    except OSError:
        pass

    os.mknod(settings.KCING_DB)
    init()
    create_tables()

    rc = unittest.main()

    end()

    return rc
예제 #19
0
def add_db_record(config, sat_name, automate_started, aos_time, los_time, max_elev, record_time):
    """ Add record of the pass to database """
    engine = models.init(config)
    session_maker = models.get_session(engine)
    session = session_maker()

    sat_pass = models.Passes()
    sat_pass.sat_name = sat_name
    sat_pass.automate_started = datetime.datetime.utcfromtimestamp(automate_started)
    sat_pass.aos_time = datetime.datetime.utcfromtimestamp(aos_time)
    sat_pass.los_time = datetime.datetime.utcfromtimestamp(los_time)
    sat_pass.max_elev = max_elev
    sat_pass.record_time = record_time
    sat_pass.sat_type = sat_type(sat_name)

    session.add(sat_pass)
    session.commit()
예제 #20
0
 def __init_model(self):
     with tf.Graph().as_default():
         self.placeholder = tf.placeholder(dtype=tf.float32,
                                           shape=[
                                               None,
                                               self._arg_dict['img_size'],
                                               self._arg_dict['img_size'], 3
                                           ])
         self.logits = models.init(self._arg_dict['model'],
                                   self.placeholder,
                                   self._arg_dict['landmark_type'] * 2,
                                   is_training=False)
         saver = tf.train.Saver()
         with tf.device('/gpu:0'):
             self.sess = tf.Session(config=tf.ConfigProto(
                 gpu_options=tf.GPUOptions(allow_growth=True)))
             ckpt_path = self._arg_dict['model_path']
             if tf.train.checkpoint_exists(ckpt_path):
                 saver.restore(self.sess, ckpt_path)
             else:
                 raise Exception("model_path inexistence")
예제 #21
0
train_layouts, train_objects, train_rewards, train_terminal, \
        train_instructions, train_indices, train_values, train_goals = data.to_tensor(train_data, text_vocab)

test_layouts, test_objects, test_rewards, test_terminal, \
    test_instructions, test_indices, test_values, test_goals = data.to_tensor(test_data, text_vocab)

print '<Main> Training: (', train_layouts.size(), 'x', train_objects.size(), 'x', train_indices.size(), ') -->', train_values.size()
print '<Main> test     : (', test_layouts.size(), 'x', test_objects.size(), 'x', test_indices.size(), ') -->', test_values.size()


#################################
############ Training ###########
#################################

print '\n<Main> Initializing model: {}'.format(args.model)
model = models.init(args, layout_vocab_size, object_vocab_size, text_vocab_size)

train_inputs = (train_layouts, train_objects, train_indices)
test_inputs = (test_layouts, test_objects, test_indices)

print '<Main> Training model'
trainer = pipeline.Trainer(model, args.lr, args.batch_size)
trainer.train(train_inputs, train_values, test_inputs, test_values, iters=args.iters)


#################################
######## Save predictions #######
#################################

## make logging directories
pickle_path = os.path.join(args.save_path, 'pickle')
예제 #22
0
파일: soil.py 프로젝트: DavidGMUPM/soil
from models import *
from nxsim import NetworkSimulation
import numpy
from matplotlib import pyplot as plt
import networkx as nx
import settings
import models
import math
import json

settings.init()  # Loads all the data from settings
models.init()  # Loads the models and network variables

####################
# Network creation #
####################

if settings.network_type == 0:
    G = nx.complete_graph(settings.number_of_nodes)
if settings.network_type == 1:
    G = nx.barabasi_albert_graph(settings.number_of_nodes, 10)
if settings.network_type == 2:
    G = nx.margulis_gabber_galil_graph(settings.number_of_nodes, None)
# More types of networks can be added here

##############
# Simulation #
##############

sim = NetworkSimulation(topology=G,
                        states=init_states,
예제 #23
0
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app

import settings
import rest
import models
import views
import paypal

from rest.views import PageHandler

import test_streetcode
import demo

models.init()

paths = [
    ('/', views.RootHandler),
]

paths.extend(rest.get_paths())

paths.extend([
    ('/(\w+)', PageHandler.using('mobile_profile.html')),
    ('/client/(\w+)/(\d+)', views.ClientHandler),
    ('/donations/(\w+)', PageHandler.using('mobile_donation.html')),
    ('/paypal/ipn', paypal.IPNHandler),  # paypal ipn handler
    ('/paypal/pdt', paypal.PDTHandler),  # paypal pdt handler
])

예제 #24
0
파일: update.py 프로젝트: larsks/dropblog
def main():
    opts = parse_args()
    models.init(opts.dburi, echo=opts.debug)
    db = Session()

    dbx = dropbox.client.DropboxClient(utils.dropbox_session(db))

    for owner in db.query(Identity):
        blogs = {}
        cursor = owner.dropbox_cursor
        print 'Found cursor:', cursor
        dbx.session.set_token(owner.dropbox_key, owner.dropbox_secret)

        account_info = dbx.account_info()
        print 'Processing blogs for %s.' % account_info['display_name']

        for blog in owner.blogs:
            blogs[blog.name] = blog
        
        delta = dbx.delta(cursor)

        if delta['reset']:
            print 'RESET'
            for post in db.query(Post).join(Post.blog).filter(Blog.owner_id ==
                    owner.id):
                db.delete(post)
            db.commit()

        for entry in delta['entries']:
            if entry[1] and entry[1]['is_dir']:
                continue

            mo = re.match('/sites/(?P<blog>[^/]*)/posts/(?P<post>[^/]*\.md$)', entry[0])
            if not mo or mo.group('blog') not in blogs:
                continue

            print '->', entry[0]

            if entry[1] is None:
                db.query(Post).filter(Post.id == entry[0]).delete()
            else:
                doc = mailbox.Message(dbx.get_file(entry[0]))

                p = db.query(Post).get(entry[0])

                if not p:
                    p = Post(id=entry[0],
                        date=time.strftime('%Y-%m-%d',
                            time.localtime()))
                    db.add(p)

                blogs[mo.group('blog')].posts.append(p)

                p.title = doc.get('title',
                        os.path.splitext(os.path.basename(entry[0]))[0]).encode('utf-8')
                p.slug = slugify(p.title)
                p.published = doc.get('published', 'True') == 'True'
                p.date = doc.get('date', p.date)

                html = utils.filter_markdown(doc.get_payload())

                if not p.content:
                    p.content = Content()

                p.content.html = html

        owner.dropbox_cursor = delta['cursor']
        db.commit()
예제 #25
0
def train(prefix, **arg_dict):
    img_size = arg_dict['img_size']
    gpu_num = len(arg_dict["gpu_device"].split(','))
    batch_size = arg_dict["batch_size"]
    common_dict = {"global_step": 1}
    print ("batch_size = %d for gpu_num = %d" % (batch_size, gpu_num))
    if arg_dict["parallel_mode"] == "ModelParallel":
        print ("Working on model parallel.")
        if gpu_num <= 1:
            raise Exception("Model parallel only support more than 2 gpu number")
    elif arg_dict["parallel_mode"] == "DataParallel":
        print ("Working on data parallel")
    else:
        raise Exception("Unsupport parallel mode. see --help")
    # Creat tf_summary writer.
    try:
        from tensorboardX import SummaryWriter
        summary_dir = os.path.join(prefix, "tf_summary")
        if os.path.exists(summary_dir):
            print ("Delete old summary in first.")
            os.system("rm -rf {}".format(summary_dir))
        common_dict["tensorboard_writer"] = SummaryWriter(summary_dir)
        print ("Enable tensorboard summary.")
        print ("Please using 'python -m tensorboard.main --logdir={}'".format(summary_dir))
    except Exception as ex:
        common_dict["tensorboard_writer"] = None
        print ("Disable tensorboard summary. please install tensorboardX in first.")
        print ("Easy to install by 'pip install tensorboardX --user'")
    # batch generator
    _batch_reader = BatchReader(**arg_dict)
    _batch_generator = _batch_reader.batch_generator()
    # net
    model_params = json.loads(arg_dict["model_params"])
    model_params["image_size"] = arg_dict["img_size"]
    model_params["feature_dim"] = arg_dict["feature_dim"]
    model_params["class_num"] = arg_dict["label_num"]
    net =  models.init(arg_dict["model"], gpu_num=gpu_num, model_params=model_params,
                       parallel_mode=arg_dict["parallel_mode"], common_dict=common_dict)
    if arg_dict["parallel_mode"] == "DataParallel":
        net = nn.DataParallel(net)
        net.cuda()
    # print (net)
    if arg_dict["restore_ckpt"]:
        print ("Resotre ckpt from {}".format(arg_dict["restore_ckpt"]))
        net.load_state_dict(torch.load(arg_dict["restore_ckpt"]))
    # optimizer
    optimizer = optim.SGD(net.parameters(), lr=arg_dict['learning_rate'],
                          momentum=0.9, weight_decay=5e-4)
    lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20000, gamma=0.95)
    # start loop
    print ("Start to training...")
    start_time = time.time()
    display = 100
    loss_list = []
    while not _batch_reader.should_stop():
        #  prepare data
        batch_st = time.time()
        batch = _batch_generator.next()
        datas = batch[0].cuda()
        labels = batch[1].cuda()
        batch_et = time.time()
        #  forward and backward
        loss = net(datas, labels)
        loss = loss.mean()
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        lossd = loss.data[0]
        #  display
        loss_list.append(lossd)
        if common_dict["global_step"] % display == 0:
            end_time = time.time()
            cost_time, start_time = end_time - start_time, end_time
            sample_per_sec = int(display * batch_size / cost_time)
            sec_per_step = cost_time / float(display)
            loss_display = np.mean(loss_list)
            lr = optimizer.param_groups[0]['lr']
            print ('[%s] epochs: %d, step: %d, lr: %.5f, loss: %.5f, '\
                   'sample/s: %d, sec/step: %.3f, batch time: %.3fs' % (
                   datetime.datetime.now().strftime("%Y%m%d_%H%M%S"), 
                   _batch_reader.get_epoch(), common_dict["global_step"], lr, loss_display,
                   sample_per_sec, sec_per_step, batch_et - batch_st))
            loss_list = []
            if common_dict["tensorboard_writer"] is not None:
                common_dict["tensorboard_writer"].add_scalar("loss", loss_display,
                                                             common_dict["global_step"])
                common_dict["tensorboard_writer"].add_scalar("sample_per_sec", sample_per_sec,
                                                             common_dict["global_step"])
                common_dict["tensorboard_writer"].add_scalar("lr", lr,
                                                             common_dict["global_step"])
        if common_dict["global_step"] % 10000 == 0:
            # save checkpoint
            checkpoint_path = os.path.join(prefix, 'model.ckpt')
            torch.save(net.state_dict(), checkpoint_path)
            print ("save checkpoint to %s" % checkpoint_path)
        lr_scheduler.step()
        common_dict["global_step"] += 1
예제 #26
0
def train(prefix, **arg_dict):
    batch_size = arg_dict['batch_size']
    num_labels = arg_dict['landmark_type'] * 2
    img_size = arg_dict['img_size']
    # batch generator
    _batch_reader = BatchReader(**arg_dict)
    _batch_generator = _batch_reader.batch_generator()

    with tf.Graph().as_default():
        images = tf.placeholder(tf.float32,
                                shape=[batch_size, img_size, img_size, 3])
        point_labels = tf.placeholder(tf.float32,
                                      shape=[batch_size, num_labels])

        logits = models.init(arg_dict['model'],
                             images,
                             num_labels,
                             is_training=True)

        loss = models.get_l2_loss(logits, point_labels, batch_size)

        # Create a variable to track the global step.
        global_step = tf.Variable(0, name='global_step', trainable=False)
        learning_rate = tf.train.exponential_decay(arg_dict['learning_rate'],
                                                   global_step,
                                                   30000,
                                                   0.5,
                                                   staircase=True)
        # Use the optimizer to apply the gradients that minimize the loss
        # (and also increment the global step counter) as a single training step.
        optimizer = tf.train.AdamOptimizer(learning_rate)
        train_op = optimizer.minimize(loss, global_step=global_step)

        sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(
            allow_growth=True)))
        init = tf.global_variables_initializer()
        sess.run(init)
        saver = tf.train.Saver(tf.global_variables())

        if arg_dict['restore_ckpt']:
            variables_to_restore = tf.get_collection(
                tf.GraphKeys.TRAINABLE_VARIABLES)
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, arg_dict['restore_ckpt'])
            print('Resume-trained model restored from: %s' %
                  arg_dict['restore_ckpt'])

        tf.train.write_graph(sess.graph.as_graph_def(), '.',
                             os.path.join(prefix, 'graph_struct.txt'))

        print("Start to training...")
        start_time = time.time()
        while not _batch_reader.should_stop():
            with tf.device('/gpu:0'):
                batch = _batch_generator.next()
                _, ploss, step, lr = sess.run(
                    [train_op, loss, global_step, learning_rate],
                    feed_dict={
                        images: batch[0],
                        point_labels: batch[1]
                    })
                if step % 10 == 0:
                    end_time = time.time()
                    cost_time, start_time = end_time - start_time, end_time
                    sample_per_sec = int(10 * batch_size / cost_time)
                    sec_per_step = cost_time / 10.0
                    print(
                        '[%s] epochs: %d, step: %d, lr: %f, landmark_loss: %.4f, sample/s: %d, sec/step: %.3f'
                        % (datetime.datetime.now().strftime("%Y%m%d_%H%M%S"),
                           _batch_reader.get_epoch(), step, lr, ploss,
                           sample_per_sec, sec_per_step))
            if step % 1024 == 0:
                checkpoint_path = os.path.join(prefix, 'model.ckpt')
                saver.save(sess, checkpoint_path)
                print('Saved checkpoint to %s' % checkpoint_path)
        checkpoint_path = os.path.join(prefix, 'model.ckpt')
        saver.save(sess, checkpoint_path)
        print('\nReview training parameter:\n%s\n' % (str(arg_dict)))
        print('Saved checkpoint to %s' % checkpoint_path)
        print('Bye Bye!')
예제 #27
0
def generate_static_web(config, sat_name, automate_started,
                        aos_time, los_time, max_elev, record_time):
    """ Generate static web pages """
    if not config.getboolean("PROCESSING", "staticWeb"):
        return

    engine = models.init(config)
    session_maker = models.get_session(engine)
    session = session_maker()

    cur_path = os.path.dirname(os.path.abspath(__file__))
    template_env = Environment(
        autoescape=False,
        loader=FileSystemLoader(os.path.join(cur_path, 'templates')),
        trim_blocks=False)
    template_env.filters['datetime'] = format_datetime

    def render_template(template, context):
        """ Render the template """
        return template_env.get_template(template).render(context)

    emerge_time_utc = time.strftime('%Y-%m-%dT%H:%M:%S', time.gmtime(aos_time))

    if not os.path.exists(config.get("DIRS", "staticWeb")):
        print cfg.logLineStart + \
            "PATH for static web doesn't exist, can't generate web pages" + \
            cfg.logLineEnd
        return

    # Generate the web page of the pass itself

    # time is UTC
    dst_single_pass = os.path.join(
        config.get("DIRS", "staticWeb"), "{}.html".format(emerge_time_utc.replace(":", "-")))

    # timestamp - UTC
    img_tstamp = datetime.datetime.utcfromtimestamp(aos_time).strftime('%Y%m%d-%H%M')
    with open(dst_single_pass, 'w') as single_pass_file:
        ctx = {
            'sat_name': sat_name,
            'aos_time': aos_time,  # localtime
            'los_time': los_time,  # localtime
            'automate_started': automate_started,  # UTC (from time.time())
            'max_el': max_elev,
            'record_time': record_time,
            'img_tstamp': img_tstamp,
            'sat_type': sat_type(sat_name),
        }

        if config.getboolean('PROCESSING', 'wxEnhCreate'):
            ctx['enhancements'] = []
            for enhancement in config.getlist('PROCESSING', 'wxEnhList'):
                filename = "{}-{}-map.jpg".format(img_tstamp, enhancement)
                ctx['enhancements'].append({
                    'name': enhancement,
                    'img_path': filename,
                    'img_full_path': os.path.join("/img_noaa", sat_name, filename),
                    'log': "{}.txt".format(os.path.join("/img_noaa", sat_name, filename)),
                })

        if config.getboolean('PROCESSING', 'createSpectro'):
            filename = "{}-{}.png".format(sat_name.replace(" ", ""), str(aos_time))
            ctx['spectro'] = {
                'filename': os.path.join("/spectro_noaa", filename)
            }

        html = render_template(config.get('STATIC_WEB', 'single_pass'), ctx)
        single_pass_file.write(html)
        print cfg.logLineStart + "Wrote web page for single NOAA pass" + cfg.logLineEnd

    # Generate the home page of the passes
    # The CSV is filled even if no static web generation is activated
    # Cycle over the CSV, regenerating home plus every pages splitted on config.passes_per_page

    # Latest first
    passes = session.query(models.Passes).order_by(models.Passes.aos_time.desc())

    # (re)generate the home page
    passes_per_pages = config.getint('STATIC_WEB', 'passes_per_page')

    page = 0
    pages = int(math.ceil(float(passes.count()) / float(passes_per_pages)))
    home_passes = passes[0:passes_per_pages]

    index_page = os.path.join(
        config.get("DIRS", "staticWeb"), "index.html")
    with open(index_page, 'w') as index_page_file:
        ctx = {
            'passes': home_passes,
            'passes_per_pages': passes_per_pages,
            'start': 0,
            'total': passes.count(),
            'page': page,
            'pages': pages,
            'pages_list': range(0, pages),
        }
        html = render_template(config.get('STATIC_WEB', 'index_passes'), ctx)
        index_page_file.write(html)
        page = 1  # index created, increment page
        #print cfg.logLineStart + \
        #      "Wrote web page for index passes, page 0 0-{}".format(passes_per_pages) + \
        #      cfg.logLineEnd

    if passes.count() > passes_per_pages:
        # We have more pages to show
        for _ in range(1, pages):
            start_passes = (page * passes_per_pages)
            page_passes = passes[start_passes:start_passes + passes_per_pages]
            passes_page = os.path.join(config.get("DIRS", "staticWeb"),
                                       "index_{}.html".format(page))

            with open(passes_page, 'w') as passes_page_file:
                ctx = {
                    'passes': page_passes,
                    'passes_per_pages': passes_per_pages,
                    'start': start_passes,
                    'total': passes.count(),
                    'page': page,
                    'pages': pages,
                    'pages_list': range(0, pages),
                }
                html = render_template(config.get('STATIC_WEB', 'index_passes'), ctx)
                passes_page_file.write(html)
                #print cfg.logLineStart + "Wrote web page for index passes, page {} {}-{}".format(
                #    page, start_passes, start_passes + passes_per_pages
                #) + cfg.logLineEnd
            page = page + 1  # page created, increment

    print cfg.logLineStart + "Finished web page processing" + cfg.logLineEnd
예제 #28
0
opt.env = opt.model

# visdom
vis = Visualizer(opt.env)

# vis log output
vis.log('user config:')
for k, v in opt.__dict__.items():
    if not k.startswith('__'):
        vis.log('{} {}'.format(k, getattr(opt, k)))

# load data
# use torchtext to load
train_iter, test_iter = load_data(opt)

model = models.init(opt)

print(type(model))

# cuda
if opt.use_cuda:
    model.cuda()

# start trainning
model.train()
# set optimizer
optim = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
                         lr=opt.learning_rate)
optim.zero_grad()
# use cross_entropy loss for classification
criterion = F.cross_entropy
예제 #29
0
        landmark_block = nn.HybridSequential()
        with landmark_block.name_scope():
            landmark_block.add(nn.Dense(512, flatten=True),
                               nn.Dense(self.num_label, activation="sigmoid"))
        model.add(landmark_block)
        # angle_block
        if self.kwargs.get("train_angle", False):
            angle_block = nn.HybridSequential()
            with angle_block.name_scope():
                angle_block.add(nn.Dense(512, flatten=True),
                                nn.Dense(3, activation="sigmoid"))
            model.add(angle_block)
        return model


# init
def init(model, num_label, **kwargs):
    func = getattr(sys.modules["models"], model)
    return func(num_label, **kwargs)


if __name__ == "__main__":
    import models
    net = models.init("fanet8ss_inference", num_label=72 * 2)
    net.initialize()
    print net
    from mxnet import nd
    test_data = nd.random.uniform(shape=(1, 3, 128, 128))
    landmark, angle = net(test_data)
    print landmark.shape, angle.shape
예제 #30
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

# ..
import logging, argparse

# ...
import yask, config, urls, models

app = yask.App()
# ...
config.init(app)
app.init_logger()
urls.init(app)
models.init(app)

# ...
if __name__ == "__main__":
    # ...
    parser = argparse.ArgumentParser(description='Setup defaults')
    # ..
    parser.add_argument('--host',
                        dest='host',
                        default=app.config.HOST,
                        help='Default hostname')
    # ...
    parser.add_argument('--port',
                        dest='port',
                        type=int,
                        default=app.config.PORT,
                        help='Default port')
예제 #31
0
def train(prefix, **arg_dict):
    num_labels = arg_dict['landmark_type'] * 2
    img_size = arg_dict['img_size']
    train_angle = arg_dict['train_angle']
    gpu_num = len(arg_dict["gpu_device"].split(','))
    batch_size = arg_dict['batch_size'] * gpu_num
    arg_dict['batch_size'] = batch_size
    print("real batch_size = %d for gpu_num = %d" % (batch_size, gpu_num))
    # batch generator
    _batch_reader = BatchReader(**arg_dict)
    _batch_generator = _batch_reader.batch_generator()
    # net
    ctx = [mx.gpu(i) for i in range(gpu_num)]
    net = models.init(num_label=num_labels, **arg_dict)
    if arg_dict["restore_ckpt"]:
        print "resotre checkpoint from %s" % (arg_dict["restore_ckpt"])
        net.load_params(arg_dict['restore_ckpt'], ctx=ctx)
    else:
        net.initialize(init=mx.init.Xavier(), ctx=ctx)
    print net
    # loss
    losses_func = []
    if train_angle:
        losses_func.append(gluon.loss.L2Loss(weight=0.5))  # landmark
        losses_func.append(gluon.loss.L2Loss(weight=0.5))  # angle
    else:
        losses_func.append(gluon.loss.L2Loss())  # landmark
    # trainer
    trainer = gluon.Trainer(net.collect_params(), "adam",
                            {"learning_rate": arg_dict['learning_rate']})
    # start loop
    print("Start to training...")
    start_time = time.time()
    step = 0
    display = 10
    loss_list = []
    while not _batch_reader.should_stop():
        batch = _batch_generator.next()
        image = nd.array(batch[0])
        image = nd.transpose(image.astype('float32'),
                             (0, 3, 1, 2)) / 127.5 - 1.0
        image_list = gluon.utils.split_and_load(image, ctx)
        landmark = nd.array(batch[1])
        landmark_list = gluon.utils.split_and_load(landmark, ctx)
        if train_angle:
            angle = nd.array(batch[2])
            angle_list = gluon.utils.split_and_load(angle, ctx)
        with autograd.record():
            losses = []
            if train_angle:
                for _i, _l, _a in zip(image_list, landmark_list, angle_list):
                    predicts = net(_i)
                    landmark_loss = losses_func[0](predicts[0], _l)
                    angle_loss = losses_func[1](predicts[1], _a)
                    losses.append(landmark_loss + angle_loss)
            else:
                for _i, _l in zip(image_list, landmark_list):
                    predicts = net(_i)
                    landmark_loss = losses_func[0](predicts, _l)
                    losses.append(landmark_loss)
        for loss in losses:
            loss.backward()
        trainer.step(batch_size)
        loss_list.append(np.mean([nd.mean(l).asscalar() for l in losses]))
        nd.waitall()
        if step % display == 0:
            end_time = time.time()
            cost_time, start_time = end_time - start_time, end_time
            sample_per_sec = int(display * batch_size / cost_time)
            sec_per_step = cost_time / float(display)
            loss_display = "[landmark: %.5f]" % (np.mean(loss_list))
            print ('[%s] epochs: %d, step: %d, lr: %.5f, loss: %s,'\
                   'sample/s: %d, sec/step: %.3f' % (
                   datetime.datetime.now().strftime("%Y%m%d_%H%M%S"),
                   _batch_reader.get_epoch(), step, trainer.learning_rate, loss_display,
                   sample_per_sec, sec_per_step))
            loss_list = []
        if step % 1024 == 0:
            # change lr
            trainer.set_learning_rate(trainer.learning_rate * 0.95)
            # save checkpoint
            checkpoint_path = os.path.join(prefix, 'model.params')
            net.save_params(checkpoint_path)
            print("save checkpoint to %s" % checkpoint_path)
        step += 1
예제 #32
0
from flask import Flask
import models

app = Flask('supportsomething')
app.config.from_object('supportsomething.settings')
app.config.from_envvar('SETTINGS', silent=True)

# Init models
models.init(app)

# Import modules
import views
def initialise_db():
    print("Initialising database...", end="")
    sys.stdout.flush()
    init(config["connection_string"])
    create_all()
    print("Done!")
예제 #34
0
 def setUp(self):
     self.session = cql.session()
     models.init(self.session)
예제 #35
0
import flask, json
from flask import Flask, g, render_template, request, session, redirect, url_for
import itertools
import re
import os

import models

app = Flask(__name__)
app.secret_key = 'A0Zr98j/3yX R~XHH!j4mNLWX/,?RT'
db = models.init(os.environ.get("DATABASE_URL", "sqlite:///development.sqlite3"))

@app.before_request
def before_request():
	g.db = db()

@app.teardown_request
def teardown_request(exception=None):
	if hasattr(g, 'db'):
		g.db.close()

@app.route("/courses")
def courses():
	match_with = request.args.get('term', None)
	if not match_with:
	    return json.dumps([])
	matches = models.Course.search_by_keywords(match_with.split(" "))
	course_data = []
	for match in matches:
	    course_data.append({"label": match.name, "value": match.id})
	
예제 #36
0
import json

import tornado.web
import tornado.gen
import tornado.auth
import tornado.websocket
import motor.motor_tornado
from tornado.web import HTTPError
from tornado.gen import coroutine

from models import init, User, Dialog

motor_client = motor.motor_tornado.MotorClient('localhost', 27017)
db = motor_client['chatdb']
init(db)


class BaseHandler(tornado.web.RequestHandler):
    def get_current_user(self):
        return self.get_cookie('name')


class MainHandler(BaseHandler):
    def get(self):
        # if not self.get_current_user():
        #     self.redirect('/loginpage')
        f = open('static/chats/chats.html')
        s = f.read()
        f.close()
        self.write(s)
예제 #37
0
파일: app.py 프로젝트: Catamondium/scratch
from flask import Flask, jsonify, url_for, redirect
from flask_sqlalchemy import SQLAlchemy

from os import environ

import models
from tasks_api import tasks

app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
db = SQLAlchemy(app)

# For blueprint use
app.config['models'] = models.init(db)

try:
    assert environ['FLASK_ENV'] == 'development'
    db.create_all()
    from werkzeug.debug import DebuggedApplication
    app.config.from_object(__name__)
    app.wsgi_app = DebuggedApplication(app.wsgi_app, evalex=True)
except KeyError:
    pass
except AssertionError:
    pass

app.register_blueprint(tasks, url_prefix='/tasks')


@app.route('/')
def hello_json():
예제 #38
0
    import smtplib
    # from flask_mail import Mail
    try:
        host = "smtp.gmail.com"
        port = 587
        username = "******"
        password = "******"
        to = email
        # try:
        email_conn = smtplib.SMTP(host, port)
        email_conn.ehlo()
        email_conn.starttls()
        email_conn.login(username, password)
        import random
        message = """\
Subject: Reset Password OTP

This message is sent from ShelterApp Volunteer Portal for Resetting your password. Your Otp is """ + str(
            otp) + """\n\nThanks,\nShelterApp Team\[email protected]"""
        email_conn.sendmail(username, to, message)
        return True
    except:
        return False


if __name__ == "__main__":
    # mailstuff()
    email = init()
    app.secret_key = "12345678"
    app.run(host='0.0.0.0', debug=True, port=80)
예제 #39
0
import flask, json
from flask import Flask, g, render_template, request, session, redirect, url_for
import itertools
import re
import os

import models

app = Flask(__name__)
app.secret_key = 'A0Zr98j/3yX R~XHH!j4mNLWX/,?RT'
db = models.init(
    os.environ.get("DATABASE_URL", "sqlite:///development.sqlite3"))


@app.before_request
def before_request():
    g.db = db()


@app.teardown_request
def teardown_request(exception=None):
    if hasattr(g, 'db'):
        g.db.close()


@app.route("/courses")
def courses():
    match_with = request.args.get('term', None)
    if not match_with:
        return json.dumps([])
    matches = models.Course.search_by_keywords(match_with.split(" "))
예제 #40
0
app = Flask(__name__)

app.config['SECRET_KEY'] = '123456790'
app.config['DATABASE_FILE'] = 's0inv.sqlite'
app.config[
    'SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + app.config['DATABASE_FILE']
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
#app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)

admin = Admin(app,
              name='s0inv',
              template_mode='bootstrap3',
              url='/' + NAMESPACE)
security.init(app, db, admin)
models.init(app, db, admin)


def build_sample_db():
    db.drop_all()
    db.create_all()


app_dir = os.path.realpath(os.path.dirname(__file__))
database_path = os.path.join(app_dir, app.config['DATABASE_FILE'])
if not os.path.exists(database_path):
    build_sample_db()


@app.route("/")
def app_index():
예제 #41
0
 def get_session(self):
     """ Return a database session according to config values
     """
     return models.init(uri=self['dbname'], host=self['host'], user=self.user, passwd=self.passwd_hash, dbtype=self['dbtype'], path=self.args.dname)
예제 #42
0
파일: app.py 프로젝트: agungsptr/side-api
app.register_blueprint(res.inf_sarana_api, url_prefix=url_prefix)
app.register_blueprint(res.inf_dusun_api, url_prefix=url_prefix)
app.register_blueprint(res.inf_administrasi_api, url_prefix=url_prefix)
app.register_blueprint(res.inf_penduduk_api, url_prefix=url_prefix)
app.register_blueprint(res.inf_perangkat_api, url_prefix=url_prefix)
app.register_blueprint(res.inf_umum_api, url_prefix=url_prefix)
app.register_blueprint(res.geo_api, url_prefix=url_prefix)

# Set blacklist token for loging out
jwt = JWTManager(app)
blacklist = set()


@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
    jti = decrypted_token['jti']
    return jti in blacklist


@app.route('{}/user/logout'.format(url_prefix))
@jwt_required
def logout():
    jti = get_raw_jwt()['jti']
    blacklist.add(jti)
    return 'loged out'


if __name__ == '__main__':
    md.init()
    app.run()
예제 #43
0
파일: main.py 프로젝트: Jack1007/xmpptalk
  )
  botsettings = {
    'presence': settings['initial_presence'],
  }
  settings.update(config.settings)
  settings = XMPPSettings(settings)

  if config.trace:
    logging.info('enabling trace')
    for logger in ('pyxmpp2.IN', 'pyxmpp2.OUT'):
      logger = logging.getLogger(logger)
      logger.setLevel(logging.DEBUG)

  for logger in (
    'pyxmpp2.mainloop.base', 'pyxmpp2.expdict',
    'pyxmpp2.mainloop.poll', 'pyxmpp2.mainloop.events',
    'pyxmpp2.transport', 'pyxmpp2.mainloop.events',
  ):
      logger = logging.getLogger(logger)
      logger.setLevel(max((logging.INFO, config.logging_level)))

  if config.logging_level > logging.DEBUG:
    restart_if_failed(runit, 3, args=(settings, botsettings))
  else:
    runit(settings, botsettings)

if __name__ == '__main__':
  setup_logging()
  models.init()
  main()
예제 #44
0
from flask import Flask, jsonify, request

from models import User, init, session
import datetime

app = Flask(__name__)

init(app)


@app.route('/user/list', methods = ['GET'])
def user_list():
  return jsonify({'users': [u.json() for u in User.get_all()]})


@app.route('/user/save', methods = ['POST'])
def user_save():

  if 'account_value' in request.json:
    request.json['account_value'] = float(request.json['account_value'] or 0)

  if 'birthdate' in request.json:
    request.json['birthdate'] = datetime.datetime.strptime(request.json['birthdate'], '%Y-%m-%d')

  data = {}
  fields = ['account_value', 'name', 'state', 'address', 'birthdate']
  for field in fields:
    if field in request.json:
      data[field] = request.json[field]

  user = None