Esempio n. 1
0
 def __init__(self, url):
     self.url = url
     configs = config()
     self.headers = configs.HEADERS
     print(type(self.headers))
     self.sig_data = configs.SIG_DATA
     self.salt = config().SALT
     self.token_salt = configs.TOKEN_SALT
Esempio n. 2
0
    def __init__(self, keyword, pcursor=0):

        self.pcursor = pcursor
        configs = config()
        self.headers = configs.HEADERS
        self.sig_data = configs.SIG_DATA
        self.sig_data.update({'keyword': keyword})
        self.salt = config().SALT
Esempio n. 3
0
def get_message_tag(incoming_message):
    server_id = incoming_message.guild.id
    settings = config()
    server_name = str(server_id)
    for name, server_settings in settings['servers'].items():
        if server_settings['id'] == server_id:
            server_name = name
    return f"[{server_name}-#{incoming_message.channel}]"
Esempio n. 4
0
    async def get_response(self, message, bot):
        """Looks for commands in message and returns a response if a command is triggered"""
        settings = config(message.guild.id)

        for command in self.commands:
            if command.trigger(settings, message, bot):
                response = await command.get_response(settings, message, bot)
                if response and response != []:
                    return response
Esempio n. 5
0
def main():
    tornado.options.options.parse_command_line()
    application = Application()
    application.listen(port=80, address=None)  # ipv4 and v6
    tornado.ioloop.PeriodicCallback(
        application.db_ping,
        int(config().get('database', 'ping-interval')) * 1000).start()
    tornado.ioloop.PeriodicCallback(application.generate_username_cache,
                                    3600 * 1000).start()
    tornado.ioloop.IOLoop.instance().start()
Esempio n. 6
0
def connect():
    """ Connect to the PostgreSQL database server """
    conn = None
    try:
        # read connection parameters
        params = config()

        # connect to the PostgreSQL server
        print('Connecting to the PostgreSQL database...')
        conn = psycopg2.connect(**params)

        # create a cursor
        cur = conn.cursor()

        # execute a statement
        print('PostgreSQL database version:')
        cur.execute('SELECT version()')

        # execute a statement
        # print('PostgreSQL database table Creation')
        # cur.execute('CREATE TABLE users1(id integer PRIMARY KEY,email text,name text,address text)')
        # conn.commit()

        # insert a statement
        print('PostgreSQL data insertion')
        cur.execute("INSERT INTO users VALUES {}".format("(14, '*****@*****.**', 'Some Name', '123 Fake St.')"))
        conn.commit()
        # insert_query = "INSERT INTO users VALUES {}".format("(10, '*****@*****.**', 'Some Name', '123 Fake St.')")
        # cur.execute(insert_query)
        # conn.commit()

        #Load Data From Csv
        with open('user_accounts.csv', 'r') as f:
        reader = csv.reader(f)
        next(reader)  # Skip the header row.
            for row in reader:
                cur.execute(
                    "INSERT INTO users VALUES (%s, %s, %s, %s)",
                    row
                )
        conn.commit()


        # display the PostgreSQL database server version
        db_version = cur.fetchone()
        print(db_version)

        # close the communication with the PostgreSQL
        cur.close()
    except (Exception, psycopg2.DatabaseError) as error:
        print(error)
    finally:
        if conn is not None:
            conn.close()
            print('Database connection closed.')
Esempio n. 7
0
    def __init__(self, keyword):
        self.keyword = keyword
        configs = config()

        self.pcursor = "0"
        self.headers = configs.HEADERS
        search_data = {
            'keyword': keyword,
        }
        self.sig_data = configs.SIG_DATA
        self.sig_data.update(search_data)
Esempio n. 8
0
    def __init__(self):

        self.headers = config().HEADERS
        #print(type(self.headers))
        self.sig_data_ini = config().SIG_DATA
        self.salt = config().SALT
        self.token_salt = config().TOKEN_SALT
        self.sig_data = config().SIG_DATA
        self.phone = config().PHONE_NUMBER
Esempio n. 9
0
def main():
    args = config()
    class_path = args.classes_path  # classes_path
    train_path = args.train_path  # train_path
    test_path = args.test_path  # test_path
    n_gram = args.n_gram  # ngram

    data = Data(train_path, hash=True, n_gram=n_gram)
    classes = Class(class_path)
    model = Fasttext_trainer(data,
                             classes,
                             dimension=300,
                             learning_rate=0.05,
                             epoches=10)
    test_model(model, test_path, data.gram2index, classes)
Esempio n. 10
0
    def __init__(self, targetTime=PQ.PhysicalQuantity(3., 's')):
        super(Demo13, self).__init__(targetTime=targetTime)

        self.thermal = demoapp.thermal_nonstat()
        self.mechanical = demoapp.mechanical()
        self.matPlotFig = None

        if (status):  # experimental section by bp
            from Config import config
            import Pyro4
            cfg = config(3)
            ns = PyroUtil.connectNameServer(nshost=cfg.nshost,
                                            nsport=cfg.nsport,
                                            hkey=cfg.hkey)
            uri = ns.lookup(cfg.monitorName)
            self.workflowMonitor = Pyro4.Proxy(uri)
        self.updateStatus('Initialized')
        if (status):
            time.sleep(10)
def main():
    args = config()
    ns = args.ns  # value for ns
    part = args.part  # part or full
    ng_small = args.ng_small  # ngram_smallest_value
    ng_big = args.ng_big  # ngram_biggest_value
    use_subsample = args.subsample  # use_subsample or not

    corpus = Corpus(part, ng_small, ng_big, use_subsample)
    emb, _ = word2vec_trainer(corpus,
                              ns=ns,
                              dimension=64,
                              learning_rate=0.05,
                              iteration=50000)
    # Print similar words
    testwords = [
        "narrow-mindedness", "department", "campfires", "knowing", "urbanize",
        "imperfection", "principality", "abnormal", "secondary", "ungraceful"
    ]
    sim(testwords, corpus, emb)
Esempio n. 12
0
def main(name):
    cfg = config()
    env = gym.make(name).unwrapped
    setattr(cfg.__class__, 'n_action', env.action_space.shape[0])
    setattr(cfg.__class__, 'n_state', env.observation_space.shape[0])
    agent = DDPG(cfg)

    scores, critic_losses, actor_losses = list(), list(), list()

    for e in tqdm(range(cfg.max_epoches)):
        agent, critic_loss, actor_loss = train(env, cfg, agent)
        score = test(env, cfg, agent)

        scores.append(score)
        critic_losses.extend(critic_loss)
        actor_losses.extend(actor_loss)
        print('score: {:.3f}, critic_loss: {:.3f}, actor_loss: {:.3f}'.format(score, \
                                np.mean(critic_losses), \
                                np.mean(actor_losses)))

    plot(name, scores, critic_losses, actor_losses, cfg.plot_frequency)
Esempio n. 13
0
	def create(self):
		self.status_icon = gtk.StatusIcon()
		self.spring_logo_pixbuf = gtk.gdk.pixbuf_new_from_file(self.ini.get(self.profile, 'DOCKAPP', None))
		self.status_icon.set_from_file(self.ini.get(self.profile, 'DOCKAPP', None))
		self.status_icon.set_visible(True)


		self.map_index = index_map(self)
		self.map_index.check_if_update_needed()

		# Mod Index
		self.mod_index = index_mod(self)
		self.mod_index.check_if_update_needed()

		# Datadirs
		datadirs = self.unitsync_wrapper.datadirs_list()
		self.datadirs = []
		for i in range(0,len(datadirs)):
			if os.path.isdir(datadirs[i]) == True:
				self.datadirs.append(datadirs[i])

		# Classes
		self.battle = battle(self)
		self.lobby = gui_lobby(self)

		self.battle.IntegrateWithLobby(self)
		self.lobby.IntegrateWithBattle(self)

		self.lobby.Create()


		self.battle.Create()

		self.options = config(self)
		self.options.create()

		self.tooltip('Unity Lobby')
		self.blinking(True)
		self.status_icon.connect('activate', self.active)
		self.status_icon.connect('popup-menu', self.popup_menu)
Esempio n. 14
0
def connect():
    conn = None
    try:
        params = config()
        print('Connecting to the posgress')
        conn = pg.connect(**params)
        cur = conn.cursor()

        print('PostgreSQL database version:')
        cur.execute('SELECT version()')

        # display the PostgreSQL database server version
        db_version = cur.fetchone()
        print(db_version)
       
	# close the communication with the PostgreSQL
        cur.close()
    except (Exception, pg.DatabaseError) as error:
        print(error)
    finally:
        if conn is not None:
            conn.close()
            print('Database connection closed.')
    def __init__(self):
        self.config = config()
        self.rounds = 1000
        if self.config.get('database', 'type') == 'sqlite':
            self.database = SqliteDatabase(self.config.get('database', 'host'))
            self.database.connect()
        elif self.config.get('database', 'type') == 'mysql':
            self.database = MySQLDatabase(
                self.config.get('database', 'database'),
                user=self.config.get('database', 'user'),
                passwd=self.config.get('database', 'password'),
                host=self.config.get('database', 'host'))
            self.database.connect()
        elif self.config.get('database', 'type') == 'postgresql':
            self.database = PostgresqlDatabase(
                self.config.get('database', 'database'),
                user=self.config.get('database', 'user'),
                passwd=self.config.get('database', 'password'),
                host=self.config.get('database', 'host'))
            self.database.connect()
        else:
            raise self.UnsupportedDatabaseType()

        class Users(Model):
            Username = CharField(null=False,
                                 unique=True,
                                 max_length=64,
                                 primary_key=True)
            Password = CharField(
                null=False,
                max_length=76)  # Length of what sha1_crypt produces
            API_Key = CharField(null=True,
                                default=None,
                                unique=True,
                                max_length=128)  # SHA256
            Session = CharField(null=True, max_length=128,
                                default=None)  # SHA256
            Is_Admin = BooleanField(default=False)

            class Meta:
                database = self.database

        class Servers(Model):
            ID = PrimaryKeyField()
            Address = CharField(max_length=39, null=False)
            Port = IntegerField(null=False)
            Owner = CharField(null=False, max_length=64)
            Memory = IntegerField(
                null=False
            )  # int as MB, translated to <memory>MB in start command
            ServerJar = CharField(max_length=128, null=False)
            Type = CharField(max_length=128,
                             null=False)  # craftbukkit, minecraft, etc.
            Stream = CharField(null=False,
                               max_length=128)  # rb, dev, beta etc.
            Is_Active = BooleanField(
                default=False
            )  # to get around a weird issue with sqlite and autoincrement, always make unique IDs, keep them unique regardless! mysql's default behaviour makes this a non-issue but sqlite :(

            class Meta:
                database = self.database

        class Backup_Destinations(Model):
            ID = PrimaryKeyField()
            FriendlyName = CharField(max_length=255, null=False, unique=False)
            Type = CharField(
                max_length=128, null=False
            )  # The method used for backing, e.g.: zip, rdiff-backup etc.
            Folder = TextField(
                null=False
            )  # if local then local folder, remote then remote folder etc.
            Host = CharField(
                max_length=255, null=True, default=None
            )  # if remote, then this is the host to use, as defined in ~/.ssh/config, which will contain private key, public key, hostname, port etc.
            # private key is required for passwordless authentication, passwords are a bad idea and NOT supported.
            Remote = BooleanField(null=False, default=False)
            Backup_Limit = IntegerField(null=False, default=0)

            class Meta:
                database = self.database

        class Roles(Model
                    ):  # Stores role name and ID used to reference it later
            ID = PrimaryKeyField()
            RoleName = CharField(null=False, max_length=128)

            class Meta:
                database = self.database

        class Permissions(
                Model
        ):  # Stores the name of permissions and a key for referencing them.
            # Name is friendly name for the permission
            ID = PrimaryKeyField()
            PermissionName = CharField(null=False, max_length=128)
            PermissionKey = CharField(null=False, unique=True, max_length=128)

            class Meta:
                database = self.database

        class Role_Permissions(Model):  # Assign permissions to roles
            ID = PrimaryKeyField()
            Role_ID = IntegerField(null=False)
            Permission_ID = IntegerField(null=False)

            class Meta:
                database = self.database

        class User_Roles(Model):  # Assign role to users per server
            ID = PrimaryKeyField()
            User_ID = IntegerField(null=False)
            Role_ID = IntegerField(null=False)
            Server_ID = IntegerField(null=False)

            class Meta:
                database = self.database

        # The ACL stuff is copied to a large extent from http://stackoverflow.com/a/10311479/2077881
        # This is the first time I've implemented an ACL system so posts such as those are extremely helpful for
        # identifying best practice.

        self.database.autocommit = True
        self.Servers = Servers
        self.Users = Users
        self.Roles = Roles
        self.Permissions = Permissions
        self.Role_Permissions = Role_Permissions
        self.User_Roles = User_Roles
        self.Backup_Destinations = Backup_Destinations

        try:
            self.Users.select().execute()
        except:
            self.initialiseDatabase()
Esempio n. 16
0
    def setup(self, progressbar):

        # Get Datadirs from unitsync
        datadir = self.ini.get(self.profile, "SPRING_DATADIR", None)
        if datadir != None:
            os.chdir(datadir)
        unitsync.Init(True, 1)
        self.datadirs = []
        datadirs = unitsync.GetDataDirectories(False)
        for i in range(0, len(datadirs)):
            if os.path.isdir(datadirs[i]) == True:
                self.datadirs.append(datadirs[i])

                # Map Index
        self.map_index = index_map(self.map_index_file, progressbar)
        if os.path.isfile(self.map_index_file) == False:
            self.map_index.create_index()
        self.map_index.check_if_update_needed()

        # Mod Index
        self.mod_index = index_mod(self.mod_index_file, progressbar)
        if os.path.isfile(self.mod_index_file) == False:
            self.mod_index.create_index()
        self.mod_index.check_if_update_needed()

        self.lobby_table = gtk.Table(rows=2, columns=2, homogeneous=False)
        self.gui_lobby = gui_lobby(self)
        self.config = config(self)
        self.battle = battle(self)

        # Main Window
        self.window.set_title("Unity Lobby")
        self.window.set_resizable(True)
        self.window.connect("delete-event", gtk.main_quit)
        self.window.add_events(gtk.gdk.BUTTON_PRESS_MASK)

        # Vertical Box Part 1/2
        vbox = gtk.VBox(False, 0)
        vbox.show()
        self.window.add(vbox)

        # Menu Part 1/2
        menu_bar = gtk.MenuBar()
        menu_bar.show()

        # Battle Notebook
        self.battle_notebook = gtk.Notebook()
        self.battle_notebook.set_tab_pos(gtk.POS_LEFT)
        self.battle.create(self.battle_notebook)

        # Options Notebook
        self.options_notebook = gtk.Notebook()
        self.options_notebook.set_tab_pos(gtk.POS_LEFT)
        self.config.create(self.options_notebook)

        # Vertical Box Part 2/2
        vbox.pack_start(menu_bar, False, False, 2)
        vbox.pack_start(self.battle_notebook, True, True, 2)
        vbox.pack_start(self.options_notebook, True, True, 2)
        vbox.pack_start(self.lobby_table, True, True, 2)

        self.window.show()

        # Menu Part 2/2
        # Menu Items
        battle_item = gtk.MenuItem("Battle")
        lobby_item = gtk.MenuItem("Lobby")
        config_item = gtk.MenuItem("Options")
        self.start_item = gtk.MenuItem("Start")
        menu_bar.append(battle_item)
        menu_bar.append(lobby_item)
        menu_bar.append(config_item)
        menu_bar.append(self.start_item)

        battle_item.show()
        lobby_item.show()
        self.start_item.set_right_justified(1)
        config_item.show()

        # Menu-Connect
        battle_item.connect("button_press_event", self.battle_section)
        config_item.connect("button_press_event", self.config_section)
        self.start_item.connect("button_press_event", self.battle.script_create)
        lobby_item.connect("button_press_event", self.lobby_section)

        # Main Window destory event
        self.window.connect("delete-event", self.destroy)

        self.battle_section(None, None)
Esempio n. 17
0
import torch.backends.cudnn as cudnn
from Dataloader import Mydataset
import torch
import torch.nn as nn
from Config import config
import sys
import os
sys.path.append(os.getcwd())

opt = config()


class MyNetwork(nn.Module):
    def __init__(self, opt):
        super(MyNetwork, self).__init__()
        self.f1 = nn.Sequential(
            nn.Linear(opt.INPUT_SIZE, opt.HIDDEN_SIZE),
            nn.BatchNorm1d(opt.HIDDEN_SIZE),  # 正则化
            nn.ReLU(),
            nn.Dropout(0.5),  # 防止过拟合,随机关闭0.5的神经元
            nn.Linear(opt.HIDDEN_SIZE, opt.NUM_CLASS))

    def forward(self, x):
        outputs = self.f1(x)
        return outputs


dataset = Mydataset(opt)
net = MyNetwork(opt)
criterion = nn.CrossEntropyLoss()
Esempio n. 18
0
def get_logging_level():
    try:
        level = config()['logging']['level']
        return logging.getLevelName(level.upper())
    except KeyError:
        return logging.INFO
Esempio n. 19
0
from os import remove
import requests
import json
import pandas as pd
import smtplib
from Config import config
from matplotlib import pyplot as plt

from jinja2 import Environment, FileSystemLoader
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage

c = config('settings.conf')


def download_data(ticker):
    print(ticker)
    url = 'https://financialmodelingprep.com/api/v3/historical-price-full/%s?apikey=59fa8d8825c81ea3e22131b8c32a8cd1' % (
        ticker)
    response = requests.get(url)
    data = json.loads(response.text)['historical']
    dataframe_li = []
    for d in reversed(data):
        temp = [d['open'], d['high'], d['low'], d['close'], d['date']]
        dataframe_li.append(temp)

    df = pd.DataFrame(dataframe_li,
                      columns=['open', 'high', 'low', 'close', 'date'])
    return df
Esempio n. 20
0
 def __init__(self,app_name=None,config_file_path=None):
     log.Ilog("GW_APP Singleton instace __init__");
     self.app_name =app_name
     self.config = config(config_file_path=config_file_path);
Esempio n. 21
0
    def __init__(self):
        self.config = config()
        self.rounds = 1000
        if self.config.get('database', 'type') == 'sqlite':
            self.database = SqliteDatabase(self.config.get('database', 'host'))
            self.database.connect()
        elif self.config.get('database', 'type') == 'mysql':
            self.database = MySQLDatabase(self.config.get('database', 'database'),
                                          user=self.config.get('database', 'user'),
                                          passwd=self.config.get('database', 'password'),
                                          host=self.config.get('database', 'host'))
            self.database.connect()
        elif self.config.get('database', 'type') == 'postgresql':
            self.database = PostgresqlDatabase(self.config.get('database', 'database'),
                                               user=self.config.get('database', 'user'),
                                               passwd=self.config.get('database', 'password'),
                                               host=self.config.get('database', 'host'))
            self.database.connect()
        else:
            raise self.UnsupportedDatabaseType()

        class Users(Model):
            Username = CharField(null=False, unique=True, max_length=64, primary_key=True)
            Password = CharField(null=False, max_length=76)  # Length of what sha1_crypt produces
            API_Key = CharField(null=True, default=None, unique=True, max_length=128)  # SHA256
            Session = CharField(null=True, max_length=128, default=None)  # SHA256
            Is_Admin = BooleanField(default=False)

            class Meta:
                database = self.database

        class Servers(Model):
            ID = PrimaryKeyField()
            Address = CharField(max_length=39, null=False)
            Port = IntegerField(null=False)
            Owner = CharField(null=False, max_length=64)
            Memory = IntegerField(null=False)  # int as MB, translated to <memory>MB in start command
            ServerJar = CharField(max_length=128, null=False)
            Type = CharField(max_length=128, null=False)  # craftbukkit, minecraft, etc.
            Stream = CharField(null=False, max_length=128)  # rb, dev, beta etc.
            Is_Active = BooleanField(default=False)  # to get around a weird issue with sqlite and autoincrement, always make unique IDs, keep them unique regardless! mysql's default behaviour makes this a non-issue but sqlite :(

            class Meta:
                database = self.database

        class Backup_Destinations(Model):
            ID = PrimaryKeyField()
            FriendlyName = CharField(max_length=255, null=False, unique=False)
            Type = CharField(max_length=128, null=False) # The method used for backing, e.g.: zip, rdiff-backup etc.
            Folder = TextField(null=False) # if local then local folder, remote then remote folder etc.
            Host = CharField(max_length=255, null=True, default=None) # if remote, then this is the host to use, as defined in ~/.ssh/config, which will contain private key, public key, hostname, port etc.
            # private key is required for passwordless authentication, passwords are a bad idea and NOT supported.
            Remote = BooleanField(null=False, default=False)
            Backup_Limit = IntegerField(null=False, default=0)

            class Meta:
                database = self.database


        class Roles(Model):  # Stores role name and ID used to reference it later
            ID = PrimaryKeyField()
            RoleName = CharField(null=False, max_length=128)

            class Meta:
                database = self.database

        class Permissions(Model):  # Stores the name of permissions and a key for referencing them.
                                   # Name is friendly name for the permission
            ID = PrimaryKeyField()
            PermissionName = CharField(null=False, max_length=128)
            PermissionKey = CharField(null=False, unique=True, max_length=128)

            class Meta:
                database = self.database

        class Role_Permissions(Model):  # Assign permissions to roles
            ID = PrimaryKeyField()
            Role_ID = IntegerField(null=False)
            Permission_ID = IntegerField(null=False)

            class Meta:
                database = self.database

        class User_Roles(Model):  # Assign role to users per server
            ID = PrimaryKeyField()
            User_ID = IntegerField(null=False)
            Role_ID = IntegerField(null=False)
            Server_ID = IntegerField(null=False)

            class Meta:
                database = self.database

        # The ACL stuff is copied to a large extent from http://stackoverflow.com/a/10311479/2077881
        # This is the first time I've implemented an ACL system so posts such as those are extremely helpful for
        # identifying best practice.

        self.database.autocommit = True
        self.Servers = Servers
        self.Users = Users
        self.Roles = Roles
        self.Permissions = Permissions
        self.Role_Permissions = Role_Permissions
        self.User_Roles = User_Roles
        self.Backup_Destinations = Backup_Destinations

        try:
            self.Users.select().execute()
        except:
            self.initialiseDatabase()
Esempio n. 22
0
 def __init__(self):
     self.config = config()
     self.db = Libs.database.Database()
     self.authentication = Libs.authentication.Authentication(self)
     self.log = Libs.log.Log()
     self.session_cache = {}
     self.title = self.config.get('panel', 'title')
     self.name = self.config.get('panel', 'name')
     self.supervisor_config_path = self.config.get('supervisor', 'configuration')
     self.usernames = {}  # has some info about user, like is_admin etc. just to save useless SQL stuff
     self.generate_username_cache()
     self.supervisor_auth = {'Username': '', 'Password': ''}
     self.parse_supervisor_config()
     self.process_prefix = "minecraft_"
     self.craftbukkit_build_list = {}
     self.vanilla_build_list = {}
     self.supervisor = Supervisor(self.supervisor_auth['Username'], self.supervisor_auth['Password'])
     self.setup_bukkit_jar_cache()
     self.setup_vanilla_jar_cache()
     self.vanilla_builds = {}
     handlers = [
         ('Home', r'/', Handlers.Home.IndexHandler, {'title': 'Home'}),
         ('Login', r'/login', Handlers.Home.LoginHandler, {'title': 'Login'}),
         ('PerformLogin', r'/ajax/performLogin', Handlers.Ajax.PerformLogin.PerformLoginHandler, {'title': None}),
         ('Logout', r'/logout', Handlers.Home.LogoutHandler, {'title': 'Logout'}),
         ('Admin_Home', r'/admin/?', Handlers.Admin.AdminController.Index, {'title': 'Admin Home'}),
         ('Admin_Users', r'/admin/users', Handlers.Admin.AdminController.Users, {'title': 'User Management'}),
         ('Admin_Roles', r'/admin/roles', Handlers.Admin.AdminController.Roles, {'title': 'Role Management'}),
         ('Admin_Ajax_GetUsers', r'/admin/ajax/getUsers', Handlers.Admin.Ajax.UserController.GetUser, {'title': None}),
         ('Admin_Ajax_AddUser', r'/admin/ajax/addUser', Handlers.Admin.Ajax.UserController.AddUser, {'title': None}),
         ('Admin_Ajax_DeleteUser', r'/admin/ajax/deleteUser', Handlers.Admin.Ajax.UserController.DeleteUser, {'title': None}),
         ('Admin_Ajax_EditUser', r'/admin/ajax/editUser', Handlers.Admin.Ajax.UserController.EditUser, {"title": None}),
         ('Servers_Index', r'/servers/?', Handlers.Servers.Index.ServersIndexHandler, {"title": "Servers"}),
         ('Server_Index', r'/servers/(\d+)/', Handlers.Servers.Server.ServerController.Index, {"title": "Server"}),
         ('Server_Players', r'/servers/(\d+)/players', Handlers.Servers.Server.ServerController.Players, {'title': 'Server Players'}),
         ('Servers_WebSocket_CreateServer', r'/servers/websocket/createServer', Handlers.Servers.WebSocket.CreateServer.CreateServerHandler, {'title': None}),
         ('Servers_Ajax_CheckAddress', r'/servers/ajax/checkAddress', Handlers.Servers.Ajax.CheckAddress.CheckAddressHandler, {'title': None}),
         ('Servers_Ajax_GetInfo', r'/servers/ajax/getInfo', Handlers.Servers.Ajax.GetInfo.GetInfoHandler, {'title': None}),
         ('Server_Console', r'/servers/(\d+)/console', Handlers.Servers.Server.ServerController.Console, {'title': 'Server Console'}),
         ('Server_Ajax_GetLog', r'/servers/(\d+)/ajax/getLog', Handlers.Servers.Server.Ajax.GetLog.GetLogHandler, {'title': None}),
         ('Server_Ajax_SendCommand', r'/servers/(\d+)/ajax/sendCommand', Handlers.Servers.Server.Ajax.SendCommand.SendCommandHandler, {'title': None}),
         ('Server_Properties', r'/servers/(\d+)/properties', Handlers.Servers.Server.ServerController.Properties, {'title': 'Server Properties'}),
         ('Servers_DeleteServer', r'/servers/ajax/deleteServer', Handlers.Servers.Ajax.ServerController.DeleteServer, {'title': None}),
         ('Server_Ajax_GetPlayers', r'/servers/(\d+)/ajax/getPlayers', Handlers.Servers.Server.Ajax.PlayerController.GetPlayers, {'title': None}),
         ('Server_Ajax_KickPlayer', r'/servers/(\d+)/ajax/kickPlayer', Handlers.Servers.Server.Ajax.PlayerController.KickPlayer, {'title': None}),
         ('Server_Ajax_BanPlayer', r'/servers/(\d+)/ajax/banPlayer', Handlers.Servers.Server.Ajax.PlayerController.BanPlayer, {'title': None}),
         ('Server_Ajax_GetBannedPlayers', r'/servers/(\d+)/ajax/getBannedPlayers', Handlers.Servers.Server.Ajax.PlayerController.GetBannedPlayers, {'title': None}),
         ('Server_Ajax_UnbanPlayer', r'/servers/(\d+)/ajax/unbanPlayer',  Handlers.Servers.Server.Ajax.PlayerController.UnbanPlayer, {'title': None}),
         ('Server_Ajax_GetOperators', r'/servers/(\d+)/ajax/getOperators', Handlers.Servers.Server.Ajax.PlayerController.GetOperators, {'title': None}),
         ('Server_Ajax_OpPlayer', r'/servers/(\d+)/ajax/opPlayer', Handlers.Servers.Server.Ajax.PlayerController.OpPlayer, {'title': None}),
         ('Server_Ajax_DeopPlayer', r'/servers/(\d+)/ajax/deopPlayer', Handlers.Servers.Server.Ajax.PlayerController.DeopPlayer, {'title': None}),
         ('User_Index', r'/user/?', Handlers.User.Index.UserIndexHandler, {'title': 'User Settings'}),
         ('Servers_Ajax_StartServer', r'/servers/ajax/startServer', Handlers.Servers.Ajax.ServerController.StartServer, {'title': None}),
         ('Servers_Ajax_StopServer', r'/servers/ajax/stopServer', Handlers.Servers.Ajax.ServerController.StopServer, {'title': None}),
         ('Server_Update', r'/servers/(\d+)/update', Handlers.Servers.Server.ServerController.Update, {'title': 'Server Update'}),
         ('Server_Ajax_Update', r'/servers/(\d+)/ajax/update', Handlers.Servers.Server.Ajax.Update.UpdateServerHandler, {'title': None}),
         ('Server_Ajax_GetProcessInfo', r'/servers/(\d+)/ajax/getProcessInfo', Handlers.Servers.Server.Ajax.GetProcessInfo.GetProcessInfoHandler, {'title': None}),
         ('Server_WebSocket_GetLog', r'/servers/(\d+)/websocket/getLog', Handlers.Servers.Server.WebSocket.LogController.GetLog, {'title': None}),
         ('Server_Backup', r'/servers/(\d+)/backup', Handlers.Servers.Server.ServerController.Backup, {'title': 'Server Backup'}),
         ('System_BackupDestinations', r'/system/backup', Handlers.System.BackupDestinations.SystemBackupDestinationsHandler, {'title': 'Backup Destinations'}),
         ('System_Backup_Index', r'/system/backup/(\d+)/', Handlers.System.Backup.Index.BackupIndexHandler, {'title': 'Backup Destination Index'}),
         ('System_Backup_Ajax_DeleteBackup', r'/system/backup/(\d+)/ajax/deleteBackup', Handlers.System.Backup.Ajax.DeleteBackup.DeleteBackupHandler, {'title': None}),
         ('Server_Ajax_BackupServer', r'/servers/(\d+)/ajax/backupServer', Handlers.Servers.Server.Ajax.BackupServer.BackupServerHandler, {'title': None}),
         ('Server_Ajax_DeleteBackup', r'/servers/(\d+)/ajax/deleteBackup', Handlers.Servers.Server.Ajax.DeleteBackup.DeleteBackupHandler, {'title': None}),
         ('Server_Settings', r'/servers/(\d+)/settings', Handlers.Servers.Server.ServerController.Settings, {'title': 'Server Settings'}),
     ]
     handlers = [URLSpec(pattern, handler, name=name, kwargs=kwargs) for name, pattern, handler, kwargs in handlers]
     settings = dict(
         debug=True,
         gzip=True,
         login_url='/login',
         template_path=os.path.join(os.path.dirname(__file__), 'templates'),
         static_path=os.path.join(os.path.dirname(__file__), 'static'),
     )
     tornado.web.Application.__init__(self, handlers, **settings)
Esempio n. 23
0
import tensorflow as tf
import os
import cv2
from Config import config
from model import VGG_model
from read_data import read_data, batch_read_data
from tensorflow.contrib.slim import nets
import numpy as np
con = config()
train_dir = con.train_dir
checkpoint_dir = con.checkpoint_dir
tensorboard_dir = con.tensorboard_dir
tensorboard_train_dir = con.tensorboard_train_dir

if not os.path.isdir(train_dir): os.mkdir(train_dir)
if not os.path.isdir(checkpoint_dir): os.mkdir(checkpoint_dir)
if not os.path.isdir(tensorboard_dir): os.mkdir(tensorboard_dir)
if not os.path.isdir(tensorboard_train_dir): os.mkdir(tensorboard_train_dir)


def train(X, Y, keep_prob):
    #train_image, test_image, train_label, test_label = read_data()
    train_image = np.load('train_image.npy')
    test_image = np.load('test_image.npy')
    train_label = np.load('train_label.npy')
    test_label = np.load('test_label.npy')
    num_batch = int((np.array(train_image).shape[0] - 1) / con.batch_size)
    model = VGG_model(X, Y, keep_prob)

    prediction = model.inference_op()
    print("prediction:{}".format(prediction))
Esempio n. 24
0
    def setup(self, progressbar):

        # Get Datadirs from unitsync
        datadir = self.ini.get(self.profile, 'SPRING_DATADIR', None)
        if datadir != None:
            os.chdir(datadir)
        unitsync.Init(True, 1)
        self.datadirs = []
        datadirs = unitsync.GetDataDirectories(False)
        for i in range(0, len(datadirs)):
            if os.path.isdir(datadirs[i]) == True:
                self.datadirs.append(datadirs[i])

        # Map Index
        self.map_index = index_map(self.map_index_file, progressbar)
        if os.path.isfile(self.map_index_file) == False:
            self.map_index.create_index()
        self.map_index.check_if_update_needed()

        # Mod Index
        self.mod_index = index_mod(self.mod_index_file, progressbar)
        if os.path.isfile(self.mod_index_file) == False:
            self.mod_index.create_index()
        self.mod_index.check_if_update_needed()

        self.lobby_table = gtk.Table(rows=2, columns=2, homogeneous=False)
        self.gui_lobby = gui_lobby(self)
        self.config = config(self)
        self.battle = battle(self)

        # Main Window
        self.window.set_title("Unity Lobby")
        self.window.set_resizable(True)
        self.window.connect("delete-event", gtk.main_quit)
        self.window.add_events(gtk.gdk.BUTTON_PRESS_MASK)

        # Vertical Box Part 1/2
        vbox = gtk.VBox(False, 0)
        vbox.show()
        self.window.add(vbox)

        # Menu Part 1/2
        menu_bar = gtk.MenuBar()
        menu_bar.show()

        # Battle Notebook
        self.battle_notebook = gtk.Notebook()
        self.battle_notebook.set_tab_pos(gtk.POS_LEFT)
        self.battle.create(self.battle_notebook)

        # Options Notebook
        self.options_notebook = gtk.Notebook()
        self.options_notebook.set_tab_pos(gtk.POS_LEFT)
        self.config.create(self.options_notebook)

        #Vertical Box Part 2/2
        vbox.pack_start(menu_bar, False, False, 2)
        vbox.pack_start(self.battle_notebook, True, True, 2)
        vbox.pack_start(self.options_notebook, True, True, 2)
        vbox.pack_start(self.lobby_table, True, True, 2)

        self.window.show()

        # Menu Part 2/2
        #Menu Items
        battle_item = gtk.MenuItem("Battle")
        lobby_item = gtk.MenuItem("Lobby")
        config_item = gtk.MenuItem("Options")
        self.start_item = gtk.MenuItem("Start")
        menu_bar.append(battle_item)
        menu_bar.append(lobby_item)
        menu_bar.append(config_item)
        menu_bar.append(self.start_item)

        battle_item.show()
        lobby_item.show()
        self.start_item.set_right_justified(1)
        config_item.show()

        #Menu-Connect
        battle_item.connect("button_press_event", self.battle_section)
        config_item.connect("button_press_event", self.config_section)
        self.start_item.connect("button_press_event",
                                self.battle.script_create)
        lobby_item.connect("button_press_event", self.lobby_section)

        # Main Window destory event
        self.window.connect("delete-event", self.destroy)

        self.battle_section(None, None)
Esempio n. 25
0
 def run_bot(self):
     self.run(config()['bot']['token'])
Esempio n. 26
0
def testFunction():
    myCredentials = credentials("*****@*****.**", "testPassword");
    myConfig = config({'port1':True});
    NetworkHandler.updateNetworkConfig(myCredentials,myConfig);
    responseConfig = NetworkHandler.fetchNetworkConfig(myCredentials);
    print(responseConfig.toString());
Esempio n. 27
0
def main():
    args = config().parser.parse_args()
    #     if args.local_rank == -1 or args.no_cuda:
    #         device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    #         n_gpu = torch.cuda.device_count()
    #     else:
    #         torch.cuda.set_device(args.local_rank)
    #         device = torch.device("cuda", args.local_rank)
    #         n_gpu = 1
    #         # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
    #         torch.distributed.init_process_group(backend='nccl')
    logger.info(
        "device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".
        format(device, n_gpu, bool(args.local_rank != -1), args.fp16))

    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
            .format(args.gradient_accumulation_steps))

    args.train_batch_size = int(args.train_batch_size /
                                args.gradient_accumulation_steps)
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    if not args.do_train and not args.do_predict:
        raise ValueError(
            "At least one of `do_train` or `do_predict` must be True.")

    if args.do_train:
        if not args.train_file:
            raise ValueError(
                "If `do_train` is True, then `train_file` must be specified.")
        raw_train_data = json.load(open(args.train_file, mode='r'))
    if args.do_predict:
        if not args.predict_file:
            raise ValueError(
                "If `do_predict` is True, then `predict_file` must be specified."
            )
        raw_test_data = json.load(open(args.predict_file, mode='r'))

    if os.path.exists(args.output_dir) == False:
        # raise ValueError("Output directory () already exists and is not empty.")
        os.makedirs(args.output_dir, exist_ok=True)

    import pickle as cPickle
    train_examples = None
    num_train_steps = None
    bert_config = BertConfig.from_json_file(args.bert_config_file)
    tokenizer = BertTokenizer(vocab_file=args.vocab_file,
                              do_lower_case=args.do_lower_case)
    if args.do_train:
        if os.path.exists("train_file_baseline.pkl"):
            train_examples = cPickle.load(
                open("train_file_baseline.pkl", mode='rb'))
        else:
            train_examples = read_examples(raw_train_data,
                                           tokenizer=tokenizer,
                                           doc_stride=args.doc_stride,
                                           max_seq_length=args.max_seq_length,
                                           is_training=True)
            cPickle.dump(train_examples,
                         open("train_file_baseline.pkl", mode='wb'))
        logger.info("train examples {}".format(len(train_examples)))
        num_train_steps = int(
            len(train_examples) / args.train_batch_size /
            args.gradient_accumulation_steps * args.num_train_epochs)

    # Prepare model
    model = BertForMultiChoice(bert_config)
    if args.init_checkpoint is not None:
        logger.info('load bert weight')
        state_dict = torch.load(args.init_checkpoint, map_location='cpu')
        missing_keys = []
        unexpected_keys = []
        error_msgs = []
        # copy state_dict so _load_from_state_dict can modify it
        metadata = getattr(state_dict, '_metadata', None)
        state_dict = state_dict.copy()
        # new_state_dict=state_dict.copy()
        # for kye ,value in state_dict.items():
        #     new_state_dict[kye.replace("bert","c_bert")]=value
        # state_dict=new_state_dict
        if metadata is not None:
            state_dict._metadata = metadata

        def load(module, prefix=''):
            local_metadata = {} if metadata is None else metadata.get(
                prefix[:-1], {})

            module._load_from_state_dict(state_dict, prefix, local_metadata,
                                         True, missing_keys, unexpected_keys,
                                         error_msgs)
            for name, child in module._modules.items():
                # logger.info("name {} chile {}".format(name,child))
                if child is not None:
                    load(child, prefix + name + '.')

        load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
        logger.info("missing keys:{}".format(missing_keys))
        logger.info('unexpected keys:{}'.format(unexpected_keys))
        logger.info('error msgs:{}'.format(error_msgs))
    if args.fp16:
        model.half()
    model.to(device)
    if args.local_rank != -1:
        try:
            from apex.parallel import DistributedDataParallel as DDP
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
            )

        model = DDP(model)
    elif n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())

    # hack to remove pooler, which is not used
    # thus it produce None grad that break apex
    param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]]

    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]

    t_total = num_train_steps
    if args.local_rank != -1:
        t_total = t_total // torch.distributed.get_world_size()
    if args.fp16:
        try:
            from apex.optimizers import FP16_Optimizer
            from apex.optimizers import FusedAdam
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
            )

        optimizer = FusedAdam(optimizer_grouped_parameters,
                              lr=args.learning_rate,
                              bias_correction=False,
                              max_grad_norm=1.0)
        if args.loss_scale == 0:
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            optimizer = FP16_Optimizer(optimizer,
                                       static_loss_scale=args.loss_scale)
    else:
        optimizer = BertAdam(optimizer_grouped_parameters,
                             lr=args.learning_rate,
                             warmup=args.warmup_proportion,
                             t_total=t_total)

    global_step = 0
    if args.do_train:
        cached_train_features_file = args.train_file + '_{0}_{1}_v{2}'.format(
            str(args.max_seq_length), str(args.doc_stride), str(1))
        train_features = None
        try:
            with open(cached_train_features_file, "rb") as reader:
                train_features = pickle.load(reader)
        except:
            train_features = convert_examples_to_features(
                examples=train_examples,
                tokenizer=tokenizer,
                max_seq_length=args.max_seq_length,
                is_training=True)

            if args.local_rank == -1 or torch.distributed.get_rank() == 0:
                logger.info("  Saving train features into cached file %s",
                            cached_train_features_file)
                with open(cached_train_features_file, "wb") as writer:
                    pickle.dump(train_features, writer)
        logger.info("***** Running training *****")
        logger.info("  Num orig examples = %d", len(train_examples))
        logger.info("  Num split examples = %d", len(train_features))
        logger.info("  Batch size = %d", args.train_batch_size)
        logger.info("  Num steps = %d", num_train_steps)

        all_input_ids = torch.tensor([f.input_ids for f in train_features],
                                     dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in train_features],
                                      dtype=torch.long)
        all_segment_ids = torch.tensor([f.segment_ids for f in train_features],
                                       dtype=torch.long)
        all_choice_positions = torch.tensor(
            [f.choice_positions for f in train_features], dtype=torch.long)
        all_answer_positions = torch.tensor(
            [f.answer_positions for f in train_features], dtype=torch.long)
        all_choice_positions_mask = torch.tensor(
            [f.choice_positions_mask for f in train_features],
            dtype=torch.long)
        all_answer_positions_mask = torch.tensor(
            [f.answer_positions_mask for f in train_features],
            dtype=torch.long)
        all_choice_labels = torch.tensor(
            [f.choice_labels for f in train_features], dtype=torch.long)
        all_choice_labels_for_consine = torch.tensor(
            [f.choice_labels_for_consine for f in train_features],
            dtype=torch.long)

        train_data = TensorDataset(
            all_input_ids, all_input_mask, all_segment_ids,
            all_choice_positions, all_answer_positions,
            all_choice_positions_mask, all_answer_positions_mask,
            all_choice_labels, all_choice_labels_for_consine)
        if args.local_rank == -1:
            train_sampler = RandomSampler(train_data)
        else:
            train_sampler = DistributedSampler(train_data)
        train_dataloader = DataLoader(train_data,
                                      sampler=train_sampler,
                                      batch_size=args.train_batch_size,
                                      drop_last=True)

        model.train()
        for _ in trange(int(args.num_train_epochs), desc="Epoch"):
            model.zero_grad()
            epoch_itorator = tqdm(train_dataloader, disable=None)
            for step, batch in enumerate(epoch_itorator):
                if n_gpu == 1:
                    batch = tuple(
                        t.to(device)
                        for t in batch)  # multi-gpu does scattering it-self
                input_ids, input_mask, segment_ids, choice_positions, answer_positions, choice_positions_mask, answer_positions_mask, choice_labels, choice_labels_for_consine = batch
                loss1, loss2 = model(input_ids,
                                     input_mask,
                                     segment_ids,
                                     choice_positions,
                                     answer_positions,
                                     choice_positions_mask,
                                     answer_positions_mask,
                                     choice_labels,
                                     choice_labels_for_consine,
                                     limit_loss=True)
                if loss2 is not None:
                    loss = loss1 + loss2
                else:
                    loss = loss1
                if n_gpu > 1:
                    loss1 = loss1.mean()
                    loss2 = loss2.mean()
                    loss = loss.mean()  # mean() to average on multi-gpu.
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps
                if args.fp16:
                    optimizer.backward(loss)
                else:
                    loss.backward()
                if (step + 1) % args.gradient_accumulation_steps == 0:
                    # modify learning rate with special warm up BERT uses
                    lr_this_step = args.learning_rate * warmup_linear(
                        global_step / t_total, args.warmup_proportion)
                    for param_group in optimizer.param_groups:
                        param_group['lr'] = lr_this_step
                    optimizer.step()
                    optimizer.zero_grad()
                    global_step += 1

                if (step + 1) % 1 == 0:
                    if loss2 is not None:
                        logger.info(
                            "step: {} #### loss1: {}  loss2: {}".format(
                                step,
                                loss1.cpu().item(),
                                loss2.cpu().item()))
                    else:
                        logger.info("step: {} #### loss1: {}".format(
                            step,
                            loss1.cpu().item()))

    # Save a trained model
    output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
    if args.do_train:
        model_to_save = model.module if hasattr(
            model, 'module') else model  # Only save the model it-self
        torch.save(model_to_save.state_dict(), output_model_file)

    # Load a trained model that you have fine-tuned
    model_state_dict = torch.load(output_model_file)
    model = BertForMultiChoice(bert_config)
    model.load_state_dict(model_state_dict)
    model.to(device)
    if n_gpu > 1:
        model = torch.nn.DataParallel(model)

    if args.do_predict and (args.local_rank == -1
                            or torch.distributed.get_rank() == 0):
        eval_examples = read_examples(raw_test_data,
                                      tokenizer=tokenizer,
                                      doc_stride=args.doc_stride,
                                      max_seq_length=args.max_seq_length,
                                      is_training=False)
        # eval_examples=eval_examples[:100]
        eval_features = convert_examples_to_features(
            examples=eval_examples,
            tokenizer=tokenizer,
            max_seq_length=args.max_seq_length,
            is_training=False)

        logger.info("***** Running predictions *****")
        logger.info("  Num orig examples = %d", len(eval_examples))
        logger.info("  Num split examples = %d", len(eval_features))
        logger.info("  Batch size = %d", args.predict_batch_size)

        all_input_ids = torch.tensor([f.input_ids for f in eval_features],
                                     dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in eval_features],
                                      dtype=torch.long)
        all_segment_ids = torch.tensor([f.segment_ids for f in eval_features],
                                       dtype=torch.long)
        all_choice_positions = torch.tensor(
            [f.choice_positions for f in eval_features], dtype=torch.long)
        all_answer_positions = torch.tensor(
            [f.answer_positions for f in eval_features], dtype=torch.long)
        all_choice_positions_mask = torch.tensor(
            [f.choice_positions_mask for f in eval_features], dtype=torch.long)
        all_answer_positions_mask = torch.tensor(
            [f.answer_positions_mask for f in eval_features], dtype=torch.long)

        all_example_index = torch.arange(all_input_ids.size(0),
                                         dtype=torch.long)

        eval_data = TensorDataset(all_input_ids, all_input_mask,
                                  all_segment_ids, all_choice_positions,
                                  all_answer_positions,
                                  all_choice_positions_mask,
                                  all_answer_positions_mask, all_example_index)
        eval_sampler = SequentialSampler(eval_data)
        eval_dataloader = DataLoader(eval_data,
                                     sampler=eval_sampler,
                                     batch_size=args.predict_batch_size)

        model.eval()
        all_results = []
        logger.info("Start evaluating")

        for input_ids, input_mask, segment_ids, choice_positions, answer_positions, choice_positions_mask, answer_positions_mask, example_indices in tqdm(
                eval_dataloader, desc="Evaluating", disable=None):
            if len(all_results) % 1000 == 0:
                logger.info("Processing example: %d" % (len(all_results)))
            input_ids = input_ids.to(device)
            input_mask = input_mask.to(device)
            segment_ids = segment_ids.to(device)
            choice_positions = choice_positions.to(device)
            answer_positions = answer_positions.to(device)
            choice_positions_mask = choice_positions_mask.to(device)
            answer_positions_mask = answer_positions_mask.to(device)
            with torch.no_grad():
                batch_probs = model(input_ids, input_mask, segment_ids,
                                    choice_positions, answer_positions,
                                    choice_positions_mask,
                                    answer_positions_mask)  # [24, n]
            for i, example_index in enumerate(example_indices):
                probs = batch_probs[i].detach().cpu().tolist()
                eval_feature = eval_features[example_index.item()]
                unique_id = int(eval_feature.unique_id)
                all_results.append(RawResult(unique_id=unique_id,
                                             logits=probs))
        output_prediction_file = os.path.join(args.output_dir,
                                              "predictions.json")

        write_predictions(eval_examples, eval_features, all_results,
                          args.max_answer_length, output_prediction_file)
            train = data[~data.log_total_price.isnull()]
            print(test.head(5))
            print(train.head(5))
    #         train.to_csv(train1.csv)
    #         test.to_csv(test1.csv)
        except:
            print('Error: export')
    def run(self):
        if not self.load_data():
            print('Error')
        if not self.feature_transform():
            print('Error')
        if not self.category():
            print('Error')
        if not self.extend_feature():
            print('Error')
        if not self.export():
            print('Error')
        
if __name__ == '__main__':
    cf = config()
    if len(sys.argv) >= 2:
        path = sys.argv[1]
        du = data_process(cf, path)
        result = du.run()
#         if result:
#             print(du.Result)
#         else:
#             du.show_log()
        
        
Esempio n. 29
0
    def __init__(self,
                 workdir='',
                 targetTime=PQ.PhysicalQuantity('0 s'),
                 execMode=0,
                 modelID=1):
        """
        Initializes the workflow. As the workflow is non-stationary, we allocate individual
        applications and store them within a class.
        """
        super(VPSWorkflow, self).__init__(file='',
                                          workdir='',
                                          targetTime=targetTime)

        if modelID == 1:  # Airbus fuselage failure analysis
            #list of recognized input porperty IDs
            self.myInputPropIDs = [
                PropertyID.PID_ESI_VPS_PLY1_E0t1,
                PropertyID.PID_ESI_VPS_PLY1_E0t2,
                PropertyID.PID_ESI_VPS_PLY1_E0t3,
                PropertyID.PID_ESI_VPS_PLY1_E0c1,
                PropertyID.PID_ESI_VPS_PLY1_G012,
                PropertyID.PID_ESI_VPS_PLY1_G023,
                PropertyID.PID_ESI_VPS_PLY1_G013,
                PropertyID.PID_ESI_VPS_PLY1_NU12,
                PropertyID.PID_ESI_VPS_PLY1_NU23,
                PropertyID.PID_ESI_VPS_PLY1_NU13
            ]
            #list of recognized output property IDs
            self.myOutPropIDs = [
                PropertyID.PID_ESI_VPS_MOMENT_CURVE,
                PropertyID.PID_ESI_VPS_MOMENT_CURVE,
                PropertyID.PID_ESI_VPS_ROTATION_CURVE,
                PropertyID.PID_ESI_VPS_FIRST_FAILURE_MOM,
                PropertyID.PID_ESI_VPS_FIRST_FAILURE_ROT,
                PropertyID.PID_ESI_VPS_FIRST_FAILURE_LOC,
                PropertyID.PID_ESI_VPS_FIRST_FAILURE_ELE,
                PropertyID.PID_ESI_VPS_FIRST_FAILURE_PLY,
                PropertyID.PID_ESI_VPS_FIRST_FAILURE_PART
            ]
        elif modelID == 2:  # Airbus fuselage static analysis
            #list of recognized input porperty IDs
            self.myInputPropIDs = [
                PropertyID.PID_ESI_VPS_PLY1_E0t1,
                PropertyID.PID_ESI_VPS_PLY1_E0t2,
                PropertyID.PID_ESI_VPS_PLY1_E0t3,
                PropertyID.PID_ESI_VPS_PLY1_E0c1,
                PropertyID.PID_ESI_VPS_PLY1_G012,
                PropertyID.PID_ESI_VPS_PLY1_G023,
                PropertyID.PID_ESI_VPS_PLY1_G013,
                PropertyID.PID_ESI_VPS_PLY1_NU12,
                PropertyID.PID_ESI_VPS_PLY1_NU23,
                PropertyID.PID_ESI_VPS_PLY1_NU13
            ]
            #list of recognized output property IDs
            self.myOutPropIDs = [
                PropertyID.PID_ESI_VPS_MOMENT_CURVE,
                PropertyID.PID_ESI_VPS_MOMENT_CURVE,
                PropertyID.PID_ESI_VPS_ROTATION_CURVE,
                PropertyID.PID_ESI_VPS_FIRST_FAILURE_MOM,
                PropertyID.PID_ESI_VPS_FIRST_FAILURE_ROT,
                PropertyID.PID_ESI_VPS_FIRST_FAILURE_LOC,
                PropertyID.PID_ESI_VPS_FIRST_FAILURE_ELE,
                PropertyID.PID_ESI_VPS_FIRST_FAILURE_PLY,
                PropertyID.PID_ESI_VPS_FIRST_FAILURE_PART
            ]
        elif modelID == 3:  # Airbus fuselage buckling analysis
            #list of recognized input porperty IDs
            self.myInputPropIDs = [
                PropertyID.PID_ESI_VPS_PLY1_E0t1,
                PropertyID.PID_ESI_VPS_PLY1_E0t2,
                PropertyID.PID_ESI_VPS_PLY1_E0t3,
                PropertyID.PID_ESI_VPS_PLY1_E0c1,
                PropertyID.PID_ESI_VPS_PLY1_G012,
                PropertyID.PID_ESI_VPS_PLY1_G023,
                PropertyID.PID_ESI_VPS_PLY1_G013,
                PropertyID.PID_ESI_VPS_PLY1_NU12,
                PropertyID.PID_ESI_VPS_PLY1_NU23,
                PropertyID.PID_ESI_VPS_PLY1_NU13
            ]
            #list of recognized output property IDs
            self.myOutPropIDs = [PropertyID.PID_ESI_VPS_BUCKL_LOAD]
        else:
            log.debug("Unknown model ID, exiting.")

        # list of compulsory IDs
        self.myCompulsoryPropIDs = self.myInputPropIDs

        #dictionary of input properties (values)
        self.myInputProps = {}
        #dictionary of output properties (values)
        self.myOutProps = {}

        # Allocate VPS API
        self.VPS_API = None
        if execMode == 0:
            try:
                # Allocate local VPS API instance
                self.VPS_API = VPS_API(workdir=workdir, modelID=modelID)
                log.info('Created ESI VPS local application interface')
            except Exception as err:
                log.exception("Allocating local VPS API failed: " + repr(err))
        elif execMode == 1:
            # Get configuration
            cfg = config(mode=2)

            #locate nameserver
            ns = PyroUtil.connectNameServer(cfg.nshost, cfg.nsport, cfg.hkey)
            #connect to JobManager running on (remote) server
            self.vpsJobMan = PyroUtil.connectJobManager(
                ns, cfg.jobManName, cfg.hkey)

            # Allocate remote ESI VPS instance
            try:
                self.VPS_API = PyroUtil.allocateApplicationWithJobManager(
                    ns, self.vpsJobMan, None, cfg.hkey, sshContext=None)
                log.info('Created ESI VPS remote application interface')
            except Exception as err:
                log.exception("Allocating VPS jobmanager failed: " + repr(err))
            else:
                if ((self.VPS_API is not None)):
                    VPS_APISignature = self.VPS_API.getApplicationSignature()
                    log.info("Working ESI VPS solver on server " +
                             VPS_APISignature)
                else:
                    log.debug("Connection to server failed, exiting.")
Esempio n. 30
0
import os
from Config import config
import cv2
import numpy as np
from sklearn.model_selection import train_test_split

con = config()


def batch_read_data(train_image, train_label):
    train_image = np.array(train_image)
    train_label = np.array(train_label)
    print("train_image.shape:{}".format(train_image.shape))
    print("train_label.shape:{}".format(train_label.shape))
    data_len = np.array(train_image).shape[0]
    num_batch = int((data_len - 1) / con.batch_size)
    print("num_batch:{}".format(num_batch))
    indices = np.random.permutation(np.arange(data_len))
    x_shuffle = train_image[indices]
    y_shuffle = train_label[indices]
    for i in range(num_batch):
        start_id = i * con.batch_size
        end_id = min((i + 1) * con.batch_size, data_len)
        yield x_shuffle[start_id:end_id], y_shuffle[start_id:end_id]


def read_data():
    path = con.path
    if os.path.exists(path):
        file_name = os.listdir(path)
        print(file_name)
Esempio n. 31
0
import os
import sys
sys.path.extend(['..', '../../..'])
from mupif import *
Util.changeRootLogger('server.log')
import argparse
# Read int for mode as number behind '-m' argument: 0-local (default), 1-ssh, 2-VPN
mode = argparse.ArgumentParser(
    parents=[Util.getParentParser()]).parse_args().mode
from Config import config
cfg = config(mode)

# locate nameserver
ns = PyroUtil.connectNameServer(nshost=cfg.nshost,
                                nsport=cfg.nsport,
                                hkey=cfg.hkey)
# Run a daemon for jobManager on this machine
daemon = PyroUtil.runDaemon(host=cfg.server,
                            port=cfg.serverPort,
                            nathost=cfg.serverNathost,
                            natport=cfg.serverNatport,
                            hkey=cfg.hkey)

# Run job manager on a server
jobMan = SimpleJobManager.SimpleJobManager2(
    daemon, ns, None, cfg.jobManName, cfg.portsForJobs, cfg.jobManWorkDir,
    os.getcwd(), 'serverConfig', mode, cfg.jobMan2CmdPath, cfg.maxJobs,
    cfg.socketApps)

PyroUtil.runJobManagerServer(server=cfg.server,
                             port=cfg.serverPort,
Esempio n. 32
0
import tensorflow as tf
import numpy as np
from Config import config
params = config()


class VGG_model(object):
    def __init__(self, inptut, output, keep_prob):
        self.X = inptut
        self.Y = output
        self.keep_prob = keep_prob

    def inference_op(self):
        # block 1 -- outputs 112x112x64
        conv1_1 = self.conv_op(self.X,
                               name="conv1_1",
                               kh=3,
                               kw=3,
                               n_out=64,
                               dh=1,
                               dw=1)
        conv1_2 = self.conv_op(conv1_1,
                               name="conv1_2",
                               kh=3,
                               kw=3,
                               n_out=64,
                               dh=1,
                               dw=1)
        pool1 = self.mpool_op(conv1_2, name="pool1", kh=2, kw=2, dw=2, dh=2)

        # block 2 -- outputs 56x56x128