예제 #1
0
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    model_dir = utils.get_model_dir(config)
    path, step, epoch = utils.train.load_model(model_dir)
    state_dict = torch.load(path, map_location=lambda storage, loc: storage)
    mapper = [(inflection.underscore(name), member()) for name, member in inspect.getmembers(importlib.machinery.SourceFileLoader('', __file__).load_module()) if inspect.isclass(member)]
    path = os.path.join(model_dir, os.path.basename(os.path.splitext(__file__)[0])) + '.xlsx'
    with xlsxwriter.Workbook(path, {'strings_to_urls': False, 'nan_inf_to_errors': True}) as workbook:
        worksheet = workbook.add_worksheet(args.worksheet)
        for j, (key, m) in enumerate(mapper):
            worksheet.write(0, j, key)
            for i, (name, variable) in enumerate(state_dict.items()):
                value = m(name, variable)
                worksheet.write(1 + i, j, value)
            if hasattr(m, 'format'):
                m.format(workbook, worksheet, i, j)
        worksheet.autofilter(0, 0, i, len(mapper) - 1)
        worksheet.freeze_panes(1, 0)
    logging.info(path)
예제 #2
0
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    model_dir = utils.get_model_dir(config)
    height, width = tuple(map(int, config.get('image', 'size').split()))
    resize = transform.parse_transform(config, config.get('transform', 'resize_test'))
    transform_image = transform.get_transform(config, config.get('transform', 'image_test').split())
    transform_tensor = transform.get_transform(config, config.get('transform', 'tensor').split())
    # load image
    image_bgr = cv2.imread('image.jpg')
    image_resized = resize(image_bgr, height, width)
    image = transform_image(image_resized)
    tensor = transform_tensor(image).unsqueeze(0)
    # Caffe2
    init_net = caffe2_pb2.NetDef()
    with open(os.path.join(model_dir, 'init_net.pb'), 'rb') as f:
        init_net.ParseFromString(f.read())
    predict_net = caffe2_pb2.NetDef()
    with open(os.path.join(model_dir, 'predict_net.pb'), 'rb') as f:
        predict_net.ParseFromString(f.read())
    p = workspace.Predictor(init_net, predict_net)
    results = p.run([tensor.numpy()])
    logging.info(utils.abs_mean(results[0]))
    logging.info(hashlib.md5(results[0].tostring()).hexdigest())
예제 #3
0
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    cache_dir = utils.get_cache_dir(config)
    os.makedirs(cache_dir, exist_ok=True)
    shutil.copyfile(os.path.expanduser(os.path.expandvars(config.get('cache', 'category'))), os.path.join(cache_dir, 'category'))
    category = utils.get_category(config)
    category_index = dict([(name, i) for i, name in enumerate(category)])
    datasets = config.get('cache', 'datasets').split()
    for phase in args.phase:
        path = os.path.join(cache_dir, phase) + '.pkl'
        logging.info('save cache file: ' + path)
        data = []
        for dataset in datasets:
            logging.info('load %s dataset' % dataset)
            module, func = dataset.rsplit('.', 1)
            module = importlib.import_module(module)
            func = getattr(module, func)
            data += func(config, path, category_index)
        if config.getboolean('cache', 'shuffle'):
            random.shuffle(data)
        with open(path, 'wb') as f:
            pickle.dump(data, f)
    logging.info('%s data are saved into %s' % (str(args.phase), cache_dir))
예제 #4
0
파일: qsx.py 프로젝트: JulianEberius/qsx
 def init(self):
     self = super(QSX, self).init()
     self.injected_apps = []
     self.groups = []
     self.active_space = None
     self.active_group = None
     self.setup_observers()
     hotkeys.init_default_qsx_hotkeys(self)
     utils.load_config()
     return self
예제 #5
0
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    model_dir = utils.get_model_dir(config)
    init_net = onnx_caffe2.helper.load_caffe2_net(os.path.join(model_dir, 'init_net.pb'))
    predict_net = onnx_caffe2.helper.load_caffe2_net(os.path.join(model_dir, 'predict_net.pb'))
    benchmark = onnx_caffe2.helper.benchmark_caffe2_model(init_net, predict_net)
    logging.info('benchmark=%f(milliseconds)' % benchmark)
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    model_dir = utils.get_model_dir(config)
    model = onnx.load(model_dir + '.onnx')
    onnx.checker.check_model(model)
    init_net, predict_net = onnx_caffe2.backend.Caffe2Backend.onnx_graph_to_caffe2_net(model.graph, device='CPU')
    onnx_caffe2.helper.save_caffe2_net(init_net, os.path.join(model_dir, 'init_net.pb'))
    onnx_caffe2.helper.save_caffe2_net(predict_net, os.path.join(model_dir, 'predict_net.pb'), output_txt=True)
    logging.info(model_dir)
예제 #7
0
파일: mysql.py 프로젝트: Abriko/pyLAMP
def check_site_id(arg=None):
	import db
	config = utils.load_config()

	data = db.Connection(host='127.0.0.1',database='lamp',user=config['lampuser'],password=config['lamppass'])

	sid = -1
	sites = data.query('SELECT id, domain, site_root, alias FROM lamp.lamp__sites ORDER BY id')

	if arg and arg[0].isdigit():
		sid = arg[0]


	while True:

		if sid == -1:
			print 'ID\tdomain\t\tpath'
			for s in sites:
				print '%s\t%s \t%s' % (s.id, s.domain, s.site_root)
			sid = raw_input('[ID]:')

		for s in sites:
			if str(s.id) == sid:
				return s
		sid = -1
예제 #8
0
파일: mysql.py 프로젝트: Abriko/pyLAMP
def check_mysql_id(arg):
	import db
	config = utils.load_config()

	data = db.Connection(host='127.0.0.1',database='lamp',user=config['lampuser'],password=config['lamppass'])

	mid = -1
	# get all mysql user
	mysqls = data.query('SELECT m.id, m.site_id, m.login_name, s.domain FROM lamp__mysql AS m, lamp__sites AS s WHERE m.site_id = s.id ORDER BY m.id')

	if arg and arg[0].isdigit():
		mid = arg[0]


	while True:

		if mid == -1:
			print '\nID\tuser\t\tsite'
			for m in mysqls:
				print '%s \t%s  \t%s' % (m.id, m.login_name, m.domain)
			mid = raw_input('Please input MySQL id:')

		for m in mysqls:
			if str(m.id) == mid:
				return [mid, m.login_name]
		# no found in this loop
		mid = -1
예제 #9
0
파일: plugin.py 프로젝트: msparks/pyhole
def reload_plugins(*args, **kwargs):
    """Module function that'll reload all of the plugins"""
    config = utils.load_config("Pyhole", kwargs.get("conf_file"))

    # When the modules are reloaded, the meta class will append
    # all of the classes again, so we need to make sure this is empty
    Plugin._plugin_classes = []
    _reset_variables()

    # Now reload all of the plugins
    plugins_to_reload = []
    plugindir = "pyhole.plugins"

    # Reload existing plugins
    for mod, val in sys.modules.items():
        if plugindir in mod and val and mod != plugindir:
            mod_file = val.__file__
            if not os.path.isfile(mod_file):
                continue
            for p in config.get("plugins", type="list"):
                if plugindir + "." + p == mod:
                    plugins_to_reload.append(mod)

    for p in plugins_to_reload:
        try:
            reload(sys.modules[p])
        except Exception, e:
            # Catch all because this could be many things
            kwargs.get("irc").log.error(e)
            pass
예제 #10
0
파일: mysql.py 프로젝트: Abriko/pyLAMP
def check_ftp_id(arg=None):
	import db
	config = utils.load_config()

	data = db.Connection(host='127.0.0.1',database='lamp',user=config['lampuser'],password=config['lamppass'])

	fid = -1
	ftps = data.query('SELECT f.id, f.site_id, f.username, f.local_root, s.domain FROM lamp__ftp AS f, lamp__sites AS s WHERE f.site_id = s.id ORDER BY f.id')

	if arg and arg[0].isdigit():
		fid = arg[0]


	while True:

		if fid == -1:
			print '\nID\tuser\t\tpath\t\tsite'
			for f in ftps:
				print '%s \t%s  \t%s  \t%s' % (f.id, f.username, f.local_root, f.domain)
			fid = raw_input('Please input FTP id:')

		for f in ftps:
			if str(f.id) == fid:
				return f
		fid = -1
예제 #11
0
파일: mysql.py 프로젝트: Abriko/pyLAMP
def delete_mysql(arg=None):
	config = utils.load_config()
	m = check_mysql_id(arg)
	logging.debug('get mysql info m: %s', m)

	if not utils.get_yseorno('Do you really want to delete mysql user: %s and drop databases that have the same names as the users?' % m[1]):
		sys.exit()

	import db
	config = json.load(open('/etc/lamp/config'))

	if utils.get_yseorno('Do you want to backup database?'):
		bk_name = backup_database(m[1], config)
		print 'Database backup to %s' % bk_name


	data = db.Connection(host='127.0.0.1',database='lamp',user=config['lampuser'],password=config['lamppass'])
	try:
		r_mysql = data.get('SELECT user, host FROM mysql.user WHERE user = \"%s\"' % m[1])

		logging.debug('del mysql %s, %s', m, r_mysql)
		if r_mysql:
			data.execute('DROP USER \"%s\"@\"%s\"' % (r_mysql.user, r_mysql.host))
			data.execute('DROP DATABASE %s' % m[1])

			data.execute('DELETE FROM lamp__mysql WHERE login_name = \"%s\"' % r_mysql.user)
		else:
			logging.info('Nothing to delete..')
	except Exception,e:
		logging.info('delete mysql and database has some errors : %s', str(e))
예제 #12
0
    def verify(self, config_path, name, type):
        try:
            share = load_config(config_path, '{0}-{1}'.format(type, name))
        except FileNotFoundError:
            raise VerifyException(
                errno.ENOENT,
                'There is no share {0} of type {1} at {2} to be imported.'.format(name, type, config_path)
            )
        except ValueError:
            raise VerifyException(
                errno.EINVAL,
                'Cannot read configuration file. File is not a valid JSON file'
            )

        if share['type'] != type:
            raise VerifyException(
                errno.EINVAL,
                'Share type {0} does not match configuration file entry type {1}'.format(type, share['type'])
            )

        if not self.dispatcher.call_sync('share.supported_types').get(share['type']):
            raise VerifyException(errno.ENXIO, 'Unknown sharing type {0}'.format(share['type']))

        if self.datastore.exists(
            'shares',
            ('type', '=', share['type']),
            ('name', '=', share['name'])
        ):
            raise VerifyException(errno.EEXIST, 'Share {0} of type {1} already exists'.format(
                share['name'],
                share['type']
            ))

        return ['system']
    def run(self, name, volume):
        try:
            container = load_config(
                self.dispatcher.call_sync(
                    'volume.resolve_path',
                    volume,
                    os.path.join('vm', name)
                ),
                'vm-{0}'.format(name)
            )
        except FileNotFoundError:
            raise TaskException(errno.ENOENT, 'There is no {0} on {1} volume to be imported.'. format(name, volume))
        except ValueError:
            raise VerifyException(
                errno.EINVAL,
                'Cannot read configuration file. File is not a valid JSON file'
            )

        id = self.datastore.insert('containers', container)
        self.dispatcher.dispatch_event('container.changed', {
            'operation': 'create',
            'ids': [id]
        })

        return id
예제 #14
0
파일: mysql.py 프로젝트: Abriko/pyLAMP
def delete_site(domain):
	import db
	config = utils.load_config()

	data = db.Connection(host='127.0.0.1',database='lamp',user=config['lampuser'],password=config['lamppass'])

	if domain.isdigit():
		site = data.get('SELECT id, domain, site_root FROM lamp.lamp__sites WHERE id = %s' % domain)
	else:
		site = data.get('SELECT id, domain, site_root FROM lamp.lamp__sites WHERE domain = "%s"' % domain)

	if not site:
		logging.info('domain %s don\'t exists, Please pick one:', domain)
		site = check_site_id()

	logging.debug('delete site domain: %s', domain)

	# don't delete default site
	if site.id == 1:
		logging.warning('You can\'t delete default site')
		sys.exit(-1)

	try:
		data.execute('DELETE FROM lamp.lamp__sites WHERE domain = "%s"' % domain)

	except Exception,e:
		logging.info('delete site to mysql has some errors : %s', str(e))
		site = -0
예제 #15
0
def file_tree(conf='basic', depcfg='data/deps1.cfg', output_files=('Output.js',)):
    #@@ add a dep->out map option
    tempdir, libdir = testutils.setup_temp_dir()
    merger = testutils.load_config(conf, libdir)
    handles = testutils.setup_dir(merger, prefix=libdir)
    depmap = deps.DepMap.from_resource(depcfg)
    files = [x for x in testutils.inject_deps(handles, libdir, depmap)]
    set_faux_files(merger, libdir, *output_files)
    return merger
예제 #16
0
파일: game.py 프로젝트: danigm/questgame
    def __init__(self, server='', player='p1', idx=0):
        pygame.init()
        pygame.font.init()

        self.em = EventManager()
        self.em.add_event(Event("game-event"))

        self.config = load_config(CONFIG_FILE)
        self.menu_options = self.config['MENU']

        self.idx = idx
        self.mode = "GAME"
        self.server = server
        self.player_name = player

        self.remote_game = None
        if self.server:
            self.remote_game = RemoteGame(self)

        self.window = pygame.display.set_mode((self.config['SCREENHEIGHT'], self.config['SCREENWIDTH']))
        pygame.display.set_caption(self.config['TITLE'])
        self.screen = pygame.display.get_surface()
        self.clock = pygame.time.Clock()

        self.chat_surface = pygame.Surface((self.screen.get_width(), 50))
        self.chat_surface.fill(pygame.Color(255, 255, 255))
        self.chat_surface.set_alpha(130)
        self.menu_surface = pygame.Surface((self.screen.get_width(), self.screen.get_height() // 2))
        self.menu_surface.fill(pygame.Color(255, 255, 255))
        self.menu_surface.set_alpha(130)
        self.text = ""
        self.font = pygame.font.SysFont("Sans", 16)

        self.map = Map(1, 1)
        self.map.load_from_image(self.config['MAP'])
        self.map.scroll = [0, 14]

        self.guy1 = Guy(self.idx, self)
        self.guy1.set_name(self.player_name)
        self.guy1.set_pos(17, 0)

        self.guy2 = RemoteGuy(1, self)
        self.guy2.set_name("Cat girl")
        self.guy2.movement = "circular"
        self.guy2.set_pos(15, 0)

        tree1 = Tree(self, 0)
        tree2 = Tree(self, 1)
        tree3 = Tree(self, 1)
        tree4 = Tree(self, 2)
        tree2.set_pos(20, 0)
        tree1.set_pos(20, 1)
        tree3.set_pos(19, 2)
        tree4.set_pos(18, 3)

        self.events = {}
    def __init__(self, request):
        super(Metadata_View, self).__init__(request)
        self.check_login()
        self.templatePath = 'templates/metadata.pt'
        self.config = load_config(request)
        self.metadata = self.config['metadata']
        self.featured_links = []
        self.workspaces = \
            [(i['href'], i['title']) for i in self.session['collections']]

        self.role_mappings = {'authors': 'dcterms:creator',
                              'maintainers': 'oerdc:maintainer',
                              'copyright': 'dcterms:rightsHolder',
                              'editors': 'oerdc:editor',
                              'translators': 'oerdc:translator'}

        self.subjects = ["Arts",
                         "Business",
                         "Humanities",
                         "Mathematics and Statistics",
                         "Science and Technology",
                         "Social Sciences",
                         ]

        # The roles fields are comma-separated strings. This makes the javascript
        # easier on the client side, and is easy to parse. The fields are hidden,
        # and the values will be user ids, which should not have commas in them.
        self.field_list = [
            ['authors', 'authors', {'type': 'hidden'}],
            ['maintainers', 'maintainers', {'type': 'hidden'}],
            ['copyright', 'copyright', {'type': 'hidden'}],
            ['editors', 'editors', {'type': 'hidden'}],
            ['translators', 'translators', {'type': 'hidden'}],
            ['title', 'Title', {'type': 'text'}],
            ['summary', 'Summary', {'type': 'textarea'}],
            ['keywords', 'Keywords (One per line)', {'type': 'textarea'}],
            ['subject', 'Subject', {'type': 'checkbox',
                                    'values': self.subjects}],
            ['language', 'Language', {'type': 'select',
                                      'values': languages,
                                      'selected_value': 'en'}],
            ['google_code', 'Google Analytics Code', {'type': 'text'}],
            ['workspace', 'Workspace', {'type': 'select',
                                      'values': self.workspaces}],
        ]

        self.remember_fields = [field[0] for field in self.field_list[5:]]

        # Get remembered fields from the session
        self.defaults = {}

        # Get remembered title from the session    
        if 'title' in self.session:
            self.defaults['title'] = self.session['title']
            self.config['metadata']['title'] = self.session['title']
예제 #18
0
파일: mysql.py 프로젝트: Abriko/pyLAMP
def update_site_alias(site_id, alias):
	import db
	config = utils.load_config()

	data = db.Connection(host='127.0.0.1',database='lamp',user=config['lampuser'],password=config['lamppass'])
	alias = json.dumps(alias)
	try:
		data.execute('UPDATE lamp__sites SET alias=\'%s\' WHERE id=%s' % (alias, site_id))
	except Exception,e:
		logging.info('update site alias has some errors: %s', str(e))
		return False
예제 #19
0
    def __init__(self, parent=None, config_file="src/demos/temporal/config.json"):
        super(HinselmannResults, self).__init__(parent)

        self.config = utils.load_config(config_file)
        self.width = 0
        self.height = 0
        self.width, self.height = wx.DisplaySize()
        self.parent = parent

        self.setLayout()
        self.addTooltips()
        self.bindControls()
예제 #20
0
    def run(self, config_path, name, type):

        share = load_config(config_path, '{0}-{1}'.format(type, name))

        ids = self.join_subtasks(self.run_subtask('share.{0}.import'.format(share['type']), share))

        self.dispatcher.dispatch_event('share.changed', {
            'operation': 'create',
            'ids': ids
        })

        return ids[0]
예제 #21
0
    def __init__(self, parent=None,
                 config_file="src/demos/temporal/config.json"):
        super(DiagnosisPanel, self).__init__(parent,)

        self.config = utils.load_config(config_file)
        self.width = 0
        self.height = 0
        self.width, self.height = wx.DisplaySize()

        self.setLayout()
        self.addTooltips()
        self.bindControls()
예제 #22
0
파일: mysql.py 프로젝트: Abriko/pyLAMP
def get_ftps_bysite(site_id):
	import db
	config = utils.load_config()

	data = db.Connection(host='127.0.0.1',database='lamp',user=config['lampuser'],password=config['lamppass'])

	ftps = data.query('SELECT f.id FROM lamp__ftp AS f WHERE f.site_id = %s' % site_id)

	logging.debug('delete ftps by site_id: %s, ftps: %s', site_id, ftps)

	del data
	return ftps
예제 #23
0
파일: mysql.py 프로젝트: Abriko/pyLAMP
def reset_mysql_pass(arg=None):
	m = check_mysql_id(arg)

	import db
	config = utils.load_config()
	data = db.Connection(host='127.0.0.1',database='lamp',user=config['lampuser'],password=config['lamppass'])
	user_pass = utils.gen_random_str()
	try:
		data.execute('UPDATE mysql.user SET password=PASSWORD(\'%s\') WHERE user=\'%s\'' % (user_pass, m[1]))
		data.execute('FLUSH PRIVILEGES')
	except Exception,e:
		logging.info('delete mysql and database has some errors : %s', str(e))
		sys.exit(1)
예제 #24
0
def cnxlogin_view(request):
    check_login(request)

    config = load_config(request)
    login_url = config['login_url']

    templatePath = 'templates/cnxlogin.pt'
    response = {
        'username': request.session['login'].username,
        'password': request.session['login'].password,
        'login_url': login_url,
    }
    return render_to_response(templatePath, response, request=request)
예제 #25
0
def send_text():
    """Sends the txt message from data passed through POST."""
                                
    if request.headers['Content-Type'] == 'application/json':
        #converts json to python dict
        data = request.json
        #get list of carriers from carriers.json                      
        config = load_config()
        carriers = load_carriers() 
        
        #authenticate request
        if data['api_key'] == config['api_key']:
            
            if data['carrier'] in carriers:
                #prepare the message
                carrier_choice = data['carrier']
                carrier = carriers[carrier_choice]
                number = data['number']            
                msg = data['msg']
                to =  "{0}{1}".format(number, carrier)
                sender = config['from']
                #sends the actual message
                mail = smtplib.SMTP(config['smtp_address'])
                mail.starttls()
                mail.login(config['username'], config['password'])
                mail.sendmail(sender, to, msg)
                mail.quit()
                #prepare the json response.
                log = "Message: '{0}' was sent succesfuly sent to '{1}'.".format(msg, to)
                logging(log)
                resp = {"response" : log}
                response = Response(json.dumps(resp), status=200, mimetype='application/json')
                return response
        
        #if the carrier is not supported or found in the carriers list.
        else: 
            log = "Carrier not supported."
            #log to web2txt.log file
            logging(log)
            resp = {"response" : log}
            response = Response(json.dumps(resp), status=404, mimetype='application/json')
            return response
    
    #if the content type is not json
    else:
        log = "Wrong request content-type. API only support JSON"
        #log to web2txt.log file
        logging(log)
        resp = {"response" : log}
        response = Response(json.dumps(resp), status=415, mimetype='application/json')
        return response 
예제 #26
0
파일: plugin.py 프로젝트: msparks/pyhole
def load_plugins(*args, **kwargs):
    """Module function that loads plugins from a particular directory"""
    config = utils.load_config("Pyhole", kwargs.get("conf_file"))
    plugin_names = config.get("plugins", type="list")

    for p in plugin_names:
        load_user_plugin(p, *args, **kwargs)

        try:
            __import__("pyhole.plugins", globals(), locals(), [p])
        except Exception, e:
            # Catch all because this could be many things
            kwargs.get("irc").log.error(e)
            pass
예제 #27
0
파일: deploy.py 프로젝트: kereyroper/lfm
def deploy_dir(path, kwargs):
	with utils.directory(path):
		config = utils.load_config()
		config['config'].update(kwargs)
		if 'FunctionName' not in config['config']:
			clip.exit('You must provide a function name', err=True)
		# Remove ignore paths
		for e in config['ignore'] + ['.git/', '.gitignore']:
			utils.delete_resource(e)
		# Run install command
		if 'install' in config:
			utils.shell(config['install'])
		# Zip up directory
		utils.make_zip(config['config']['FunctionName'])
		# Upload!
		params = config['config']
		upload(params['FunctionName'], params)
예제 #28
0
파일: mysql.py 프로젝트: Abriko/pyLAMP
def create_site(domain, site_root):
	import db
	config = utils.load_config()

	data = db.Connection(host='127.0.0.1',database='lamp',user=config['lampuser'],password=config['lamppass'])

	site = data.get('select domain, site_root from lamp.lamp__sites where domain = "%s"' % domain)
	if site:
		logging.info('domain %s already exists, exiting...', domain)
		sys.exit(1)

	logging.debug('insert site domain: %s, site_root: %s', domain, site_root)
	try:
		site = data.insert('lamp__sites',domain=domain, site_root=site_root)
	except Exception,e:
		logging.info('insert site to mysql has some errors : %s', str(e))
		site = -0
예제 #29
0
파일: mysql.py 프로젝트: Abriko/pyLAMP
def get_ftps(arg=None):
	import db
	config = utils.load_config()
	data = db.Connection(host='127.0.0.1',database='lamp',user=config['lampuser'],password=config['lamppass'])

	if not arg:
		ftps = data.query('SELECT f.id, f.site_id, f.username, f.local_root, s.domain FROM lamp__ftp AS f, lamp__sites AS s WHERE f.site_id = s.id ORDER BY f.id')
	elif arg[0].isdigit():
		ftps = data.query('SELECT f.id, f.site_id, f.username, f.local_root, s.domain FROM lamp__ftp AS f, lamp__sites AS s WHERE f.site_id = s.id AND f.id = %s ORDER BY f.id' % arg[0])
	else:
		ftps = data.query('SELECT f.id, f.site_id, f.username, f.local_root, s.domain FROM lamp__ftp AS f, lamp__sites AS s WHERE f.site_id = s.id AND f.username LIKE \"%%{0}%%\" ORDER BY f.id'.format(arg[0]))

	print 'FTP(%i):' % len(ftps)
	if ftps:
		for f in ftps:
			print '-' * 50
			print '\nID\tuser\t\tpath\t\tsite'
			print '%s \t%s \t%s \t%s' % (f.id, f.username, f.local_root, f.domain)
예제 #30
0
    def do_startup(self):
        Gtk.Application.do_startup(self)

        self.config = utils.load_config()

        add_account_action = Gio.SimpleAction.new("add_account", None)
        add_account_action.connect("activate", self.add_account_callback)
        self.add_action(add_account_action)

        new_action = Gio.SimpleAction.new("new", None)
        new_action.connect("activate", self.on_new)
        self.add_action(new_action)

        preview_action = Gio.SimpleAction.new("preview", None)
        preview_action.connect("activate", self.on_preview)
        self.add_action(preview_action)

        open_action = Gio.SimpleAction.new("open_draft", None)
        open_action.connect("activate", self.on_open_draft)
        self.add_action(open_action)

        save_action = Gio.SimpleAction.new("save_draft", None)
        save_action.connect("activate", self.on_save_draft)
        self.add_action(save_action)

        remove_current_blog_action = Gio.SimpleAction.new("remove_current_blog", None)
        remove_current_blog_action.connect("activate", self.on_remove_current_blog)
        self.add_action(remove_current_blog_action)

        quit_action = Gio.SimpleAction.new("quit", None)
        quit_action.connect("activate", self.on_quit)
        self.add_action(quit_action)

        for item in self.config["blogs"]:
            self.create_select_blog_action(item["id"])

        # Insert actions
        insert_image_action = Gio.SimpleAction.new("insert_image", None)
        insert_image_action.connect("activate", self.on_insert_image)
        self.add_action(insert_image_action)

        insert_code_action = Gio.SimpleAction.new("insert_code", None)
        insert_code_action.connect("activate", self.on_insert_code)
        self.add_action(insert_code_action)
예제 #31
0
def get_hash(config_file, token):
    prompt_warning('File %s will be changed.' % config_file)
    config = load_config(config_file)
    github = Github(config['player'], token)
    return start_get_hash(config, github, config_file)
예제 #32
0
 def __init__(self, file_=None, **kwargs):
     self.default = utils.load_config(self, file_)
     utils.set_mosdirs(self)
     self.config_file_name = file_
예제 #33
0
def train():
    # load data sets
    train_sentences = load_sentences(FLAGS.train_file, FLAGS.lower,
                                     FLAGS.zeros)
    dev_sentences = load_sentences(FLAGS.dev_file, FLAGS.lower, FLAGS.zeros)
    test_sentences = load_sentences(FLAGS.test_file, FLAGS.lower, FLAGS.zeros)

    # Use selected tagging scheme (IOB / IOBES)
    # update_tag_scheme(train_sentences, FLAGS.tag_schema)
    # update_tag_scheme(test_sentences, FLAGS.tag_schema)
    # update_tag_scheme(dev_sentences, FLAGS.tag_schema)
    # create maps if not exist
    if not os.path.isfile(FLAGS.map_file):
        # create dictionary for word
        if FLAGS.pre_emb:
            dico_chars_train = char_mapping(train_sentences, FLAGS.lower)[0]
            dico_chars, char_to_id, id_to_char = augment_with_pretrained(
                dico_chars_train.copy(), FLAGS.emb_file,
                list(
                    itertools.chain.from_iterable([[w[0] for w in s]
                                                   for s in test_sentences])))
        else:
            _c, char_to_id, id_to_char = char_mapping(train_sentences,
                                                      FLAGS.lower)

        # Create a dictionary and a mapping for tags
        _t, tag_to_id, id_to_tag = tag_mapping(train_sentences, dev_sentences,
                                               test_sentences)
        #with open('maps.txt','w',encoding='utf8') as f1:
        #f1.writelines(str(char_to_id)+" "+id_to_char+" "+str(tag_to_id)+" "+id_to_tag+'\n')
        with open(FLAGS.map_file, "wb") as f:
            pickle.dump([char_to_id, id_to_char, tag_to_id, id_to_tag], f)
    else:
        with open(FLAGS.map_file, "rb") as f:
            char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)

    # prepare data, get a collection of list containing index
    train_data = prepare_dataset(train_sentences, char_to_id, tag_to_id,
                                 FLAGS.lower)
    dev_data = prepare_dataset(dev_sentences, char_to_id, tag_to_id,
                               FLAGS.lower)
    test_data = prepare_dataset(test_sentences, char_to_id, tag_to_id,
                                FLAGS.lower)
    print("%i / %i / %i sentences in train / dev / test." %
          (len(train_data), len(dev_data), len(test_data)))

    train_manager = BatchManager(train_data, FLAGS.batch_size)
    dev_manager = BatchManager(dev_data, 10)
    test_manager = BatchManager(test_data, 10)
    # make path for store log and model if not exist
    make_path(FLAGS)
    if os.path.isfile(FLAGS.config_file):
        config = load_config(FLAGS.config_file)
    else:
        config = config_model(char_to_id, tag_to_id)
        save_config(config, FLAGS.config_file)
    make_path(FLAGS)

    log_path = os.path.join("log", FLAGS.log_file)
    logger = get_logger(log_path)
    print_config(config, logger)

    # limit GPU memory
    tf_config = tf.ConfigProto()
    # tf_config.gpu_options.allow_growth = True
    steps_per_epoch = train_manager.len_data
    with tf.Session(config=tf_config) as sess:
        model = create_model(sess, Model, FLAGS.ckpt_path, load_word2vec,
                             config, id_to_char, logger)
        logger.info("start training")
        loss = []
        with tf.device("/cpu:0"):
            for i in range(100):
                for batch in train_manager.iter_batch(shuffle=True):
                    step, batch_loss = model.run_step(sess, True, batch)
                    loss.append(batch_loss)
                    if step % FLAGS.steps_check == 0:
                        iteration = step // steps_per_epoch + 1
                        logger.info("iteration:{} step:{}/{}, "
                                    "NER loss:{:>9.6f}".format(
                                        iteration, step % steps_per_epoch,
                                        steps_per_epoch, np.mean(loss)))
                        loss = []

            # best = evaluate(sess, model, "dev", dev_manager, id_to_tag, logger)
                if i % 7 == 0:
                    save_model(sess, model, FLAGS.ckpt_path, logger)
예제 #34
0
# Custom imports
sys.path.append('autoencoders/')
import EviAE
import ConvAE, SAE

##############################################################################
# Training mechanism of EviTRAM
##############################################################################

##########   Initialization of experiments properties, datasets etc.  ########

# Load configuration files
try:
    # Main autoencoder config file
    cp = utils.load_config(sys.argv[1])
except:
    print 'Help: ./train.py <path to main autoencoder ini file> <run number>'
    exit()

# Trying to reduce stochastic behaviours
SEED = cp.getint('Experiment', 'SEED')
tf.set_random_seed(SEED)
np.random.seed(SEED)

# Load dataset
inp_path = cp.get('Experiment', 'DATAINPUTPATH')
if inp_path == '':
    dataset = utils.load_mnist(
        val_size=cp.getint('Experiment', 'VALIDATIONSIZE'))
else:
        frame_length {int} -- the number of samples in each analysis window
        overlap {float} -- in [0, 1) the fraction of overlap for each window

    returns: np.array with dimensions (nb_mfcc,t,n_windows)
    """

    mfcc = generate_MFCC(array, n_mfcc, frame_length, overlap, sampling_rate,
                         n_windows)
    mfcc_formatted = np.array(np.split(mfcc, 30, axis=1))
    return mfcc_formatted


if __name__ == "__main__":

    params = load_params()
    config = load_config()
    data = read_in_data(params["sampling_rate"],
                        sample_data=config["using_sample_data"])
    sample_array = data['test'][5][1]
    sample_array = np.array(sample_array.tolist() + list(range(10000)))

    x = generate_MFCC(sample_array,
                      n_mfcc=params["MFCC"]["n_mfcc"],
                      frame_length=params["MFCC"]["frame_length_in_s"],
                      overlap=params["MFCC"]["overlap"],
                      sampling_rate=params["sampling_rate"],
                      n_windows=params["MFCC"]["n_windows"])

    #Output format should be (40, 1500)
    print(x.shape)
예제 #36
0
def train_new():
    train_sent = load_sentences(FLAGS.filepath)

    update_tag_scheme(train_sent, FLAGS.tag_schema)

    if not os.path.isfile(FLAGS.map_file):
        _c, char_to_id, id_to_char = char_mapping(train_sent, FLAGS.lower)
        print("random embedding")

        # Create a dictionary and a mapping for tags
        _t, tag_to_id, id_to_tag = tag_mapping(train_sent)
        with open(FLAGS.map_file, "wb") as f:
            pickle.dump([char_to_id, id_to_char, tag_to_id, id_to_tag], f)
    else:
        with open(FLAGS.map_file, "rb") as f:
            char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)

    # 数据准备,划分验证集和训练集
    np.random.seed(10)
    train_sent_ = np.array(train_sent)
    shuffle_indices = np.random.permutation(np.arange(len(train_sent)))

    sent_shuffled = train_sent_[shuffle_indices]
    dev_sample_index = -1 * int(FLAGS.dev_percentage * float(len(train_sent)))
    train_sent_new, dev_sent = sent_shuffled[:dev_sample_index], sent_shuffled[
        dev_sample_index:]

    train_data = prepare_dataset(train_sent_new, char_to_id, tag_to_id,
                                 FLAGS.lower)
    dev_data = prepare_dataset(dev_sent, char_to_id, tag_to_id, FLAGS.lower)

    print("%i / %i sentences in train." % (len(train_data), len(dev_data)))

    train_manager = BatchManager(train_data, FLAGS.batch_size)
    dev_manager = BatchManager(dev_data, 100)

    make_path(FLAGS)
    if os.path.isfile(FLAGS.config_file):
        config = load_config(FLAGS.config_file)
    else:
        config = config_model(char_to_id, tag_to_id)
        save_config(config, FLAGS.config_file)
    make_path(FLAGS)

    log_path = FLAGS.log_file
    logger = get_logger(log_path)
    print_config(config, logger)

    # 根据需求,设置动态使用GPU资源
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    steps_per_epoch = train_manager.len_data
    with tf.Session(config=tf_config) as sess:

        fig = plt.figure()
        ax = fig.add_subplot(211)
        ax2 = fig.add_subplot(212)
        plt.grid(True)
        plt.ion()

        model = create_model(sess, Model, FLAGS.ckpt_path, load_word2vec,
                             config, id_to_char, logger)
        logger.info("start training")
        loss = []
        for i in range(FLAGS.max_epoch):
            for batch in train_manager.iter_batch(shuffle=True):
                step, batch_loss = model.run_step(sess, True, batch)
                loss.append(batch_loss)

                if step % 20 == 0:
                    ax.scatter(step, np.mean(loss), c='b', marker='.')
                    plt.pause(0.001)

                if step % FLAGS.steps_check == 0:
                    iteration = step // steps_per_epoch + 1
                    logger.info("iteration:{} step:{}/{}, "
                                "NER loss:{:>9.6f}".format(
                                    iteration, step % steps_per_epoch,
                                    steps_per_epoch, np.mean(loss)))
                    loss = []
            best, f1 = evaluate(sess, model, "dev", dev_manager, id_to_tag,
                                logger)
            ax2.scatter(i + 1, f1, c='b', marker='.')
            plt.pause(0.001)
            if best:
                save_model(sess, model, FLAGS.ckpt_path, logger, "best")
예제 #37
0
from model import teacher_model

# download dataset with flowers 5 different classes
dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
data_dir = tf.keras.utils.get_file('flower_photos',
                                   origin=dataset_url,
                                   untar=True)
data_dir = pathlib.Path(data_dir)

# check if it was well downloaded
image_count = len(list(data_dir.glob('*/*.jpg')))
if image_count:
    print("Data loaded correctly")

# load config
config = load_config(
    str(pathlib.Path(__file__).parent.absolute()) + "/config.yaml")
if config:
    print("Config loaded correctly")

# get values from config
val_spl = config["teacher"]["train"]["val_split"]
im_h, im_w = config["teacher"]["train"]["im_h"], config["teacher"]["train"][
    "im_w"]
b_s = config["teacher"]["train"]["b_s"]
n_cl = config["teacher"]["train"]["n_classes"]
ep = config["teacher"]["train"]["epochs"]

# divide data on train and validation
# load train part
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
    data_dir,
예제 #38
0
def main():
    args = parser.parse_args()
    config = load_config(os.path.join(args.config_dir, "config.pkl"))
    is_monocular = False

    # add missing param to multiwarp stack run
    # if "multiwarp-5" in config.name:
    #     if "/stack" in config.name:
    #         config.cameras_stacked="input"

    # multiwarp-5
    if "multiwarp-5" in config.name or "multiwarp-outer" in config.name or "multiwarp-all" in config.name:
        mode = "multiwarp-5"
    elif "singlewarp" in config.name:
        mode = "singlewarp"
    else:
        raise ValueError("unknown mode")

    if "/epi" in config.name:
        enc = "epi"
    elif "/stack" in config.name:
        enc = "stack"
    elif "/focalstack-17-5" in config.name:
        enc = "focalstack-17-5"
    elif "/focalstack-17-9" in config.name:
        enc = "focalstack-17-9"
    elif "/monocular" in config.name:
        enc = "monocular"
        is_monocular = True
    else:
        raise ValueError("unknown encoding")

    if args.no_pose:
        print(
            "Considering zero ground truth pose. Only depth is predicted correctly in this case."
        )

    ## data on which the pipeline has to be run
    # config.data = "/home/dtejaswi/Documents/Projects/student_projects/joseph_daniel/data/module-1-1/module1-1-png"
    # config.data = "/home/dtejaswi/Desktop/joseph_daniel/extras/png/A/60"
    config.data = args.data_dir

    ## directory for storing output
    # config.save_path = os.path.join("/home/dtejaswi/Desktop/joseph_daniel/ral/", mode, enc)
    # output_dir = os.path.join(config.save_path, "results", args.seq)
    output_dir = os.path.join(args.config_dir, "results",
                              args.suffix + args.seq)

    if args.use_checkpoint_at is not None:
        config.dispnet = os.path.join(
            args.config_dir,
            "dispnet_" + args.use_checkpoint_at + "_checkpoint.pth.tar")
        output_dir = output_dir + "_epoch_" + args.use_checkpoint_at
    else:
        # load configuration from checkpoints
        if args.use_latest_not_best:
            config.dispnet = os.path.join(args.config_dir,
                                          "dispnet_checkpoint.pth.tar")
            output_dir = output_dir + "-latest"
        else:
            config.dispnet = os.path.join(args.config_dir,
                                          "dispnet_best.pth.tar")

    os.makedirs(output_dir)
    os.makedirs(output_dir + "/disp")

    # define transformations
    transform = custom_transforms.Compose([
        custom_transforms.ArrayToTensor(),
        custom_transforms.Normalize(mean=0.5, std=0.5)
    ])

    # Load validation dataset loaders
    if config.lfformat == 'focalstack':
        dataset = get_validation_focal_stack_loader(config, args.seq,
                                                    transform, args.no_pose)
        print("Loading images as focalstack")
    elif config.lfformat == 'stack':
        dataset = get_validation_stacked_lf_loader(config, args.seq, transform,
                                                   args.no_pose, is_monocular)
        print("Loading images as stack")
    elif config.lfformat == 'epi':
        dataset = get_validation_epi_loader(config, args.seq, transform,
                                            args.no_pose)
        print("Loading images as tiled EPIs")
    else:
        raise TypeError(
            "Unknown light field image format. Should be either focalstack, stack or epi"
        )

    output_channels = len(config.cameras)

    if config.lfformat == 'epi':
        if config.cameras_epi == "vertical":
            disp_encoder = EpiEncoder('vertical', config.tilesize).to(device)
            dispnet_input_channels = 16 + len(
                config.cameras
            )  # 16 is the number of output channels of the encoder
        elif config.cameras_epi == "horizontal":
            disp_encoder = EpiEncoder('horizontal', config.tilesize).to(device)
            dispnet_input_channels = 16 + len(
                config.cameras
            )  # 16 is the number of output channels of the encoder
        elif config.cameras_epi == "full":
            disp_encoder = EpiEncoder('full', config.tilesize).to(device)
            if config.without_disp_stack:
                dispnet_input_channels = 32  # 16 is the number of output channels of each encoder
            else:
                dispnet_input_channels = 32 + 5  # 16 is the number of output channels of each encoder, 5 from stack
        else:
            raise ValueError("Incorrect cameras epi format")
        print("Initialised disp and pose encoders")
    else:
        disp_encoder = None
        dispnet_input_channels = dataset[0]['tgt_lf_formatted'].shape[0]

    print(
        f"[DispNet] Using {dispnet_input_channels} input channels, {output_channels} output channels"
    )

    # Load disp net
    disp_net = DispNetS(in_channels=dispnet_input_channels,
                        out_channels=output_channels,
                        encoder=disp_encoder).to(device)
    weights = torch.load(config.dispnet)
    disp_net.load_state_dict(weights['state_dict'])
    disp_net.eval()
    print("Loaded only dispnet")

    # prediction
    for i, validData in enumerate(dataset):
        print("{:03d}/{:03d}".format(i + 1, len(dataset)), end="\r")

        tgt = validData['tgt_lf'].unsqueeze(0).to(device)
        ref = [r.unsqueeze(0).to(device) for r in validData['ref_lfs']]

        if config.lfformat == "epi" and config.cameras_epi == "full":
            tgt_lf_formatted_h = validData['tgt_lf_formatted_h'].unsqueeze(
                0).to(device)
            tgt_lf_formatted_v = validData['tgt_lf_formatted_v'].unsqueeze(
                0).to(device)
            tgt_stack = validData['tgt_stack'].unsqueeze(0).to(device)
            # Encode the epi images further
            if config.without_disp_stack:
                # Stacked images should not be concatenated with the encoded EPI images
                tgt_encoded_d = disp_net.encode(tgt_lf_formatted_v, None,
                                                tgt_lf_formatted_h)
            else:
                # Stacked images should be concatenated with the encoded EPI images
                tgt_encoded_d = disp_net.encode(tgt_lf_formatted_v, tgt_stack,
                                                tgt_lf_formatted_h)
        else:
            tgt_formatted = validData['tgt_lf_formatted'].unsqueeze(0).to(
                device)
            if disp_net.has_encoder():
                # This will only be called for epi with horizontal or vertical only encoding
                if config.without_disp_stack:
                    # Stacked images should not be concatenated with the encoded EPI images
                    tgt_encoded_d = disp_net.encode(tgt_formatted, None)
                else:
                    # Stacked images should be concatenated with the encoded EPI images
                    # NOTE: Here we stack all 17 images, not 5. Here the images missing from the encoding,
                    # are covered in the stack. We are not using this case in the paper at all.
                    tgt_encoded_d = disp_net.encode(tgt_formatted, tgt)
            else:
                # This will be called for focal stack and stack, where there is no encoding
                tgt_encoded_d = tgt_formatted

        disp = disp_net(tgt_encoded_d)
        # print(output.shape)
        # print(pose.shape)
        # print(tgt.shape)
        # print(ref[0].shape)

        outfile = os.path.join(output_dir, "{:06d}.png".format(i))
        plt.imsave(outfile, tgt.cpu().numpy()[0, 0, :, :], cmap='gray')
        outfile = os.path.join(output_dir, "disp/{:06d}.png".format(i))
        plt.imsave(outfile, disp.cpu().numpy()[0, 0, :, :])
        outfile = os.path.join(output_dir, "disp/{:06d}.npy".format(i))
        np.save(outfile, disp.cpu().numpy()[0, 0, :, :])
    print("\nok")
    parser.add_argument("--train", action="store_true")
    parser.add_argument("--map_size", type=int, default=125)
    parser.add_argument("--greedy", action="store_true")
    parser.add_argument("--name", type=str, default="trusty-battle")
    parser.add_argument("--eval", action="store_true")
    parser.add_argument('--alg',
                        default='dqn',
                        choices=['dqn', 'drqn', 'a2c', 'ppo'])
    parser.add_argument("--savedir", default='data/battle_model_3_players')
    args = parser.parse_args()

    # set logger
    buffer.init_logger(args.name)

    # init the game
    env = magent.GridWorld(utils.load_config(args.map_size))
    # env.set_render_dir("build/render")

    # two groups of agents
    handles = env.get_handles()

    # sample eval observation set
    eval_obs = [None, None, None]
    if args.eval:
        print("sample eval set...")
        env.reset()
        utils.generate_map(env, args.map_size, handles)
        for i in range(len(handles)):
            eval_obs[i] = buffer.sample_observation(env, handles, 2048, 500)

    # load models
예제 #40
0
            use_mpi = bool(sys.argv[2])
            print("use_mpi:", use_mpi)
    else:
        print("Usage: analyse_sims.py config_file [use_mpi]")
        sys.exit(1)

    # Begin MPI
    if use_mpi:
        comm = MPI.COMM_WORLD
        myid = comm.Get_rank()
    else:
        comm = None
        myid = 0

    # Load config file
    cfg = utils.load_config(config_file, default_cfg())
    cfg_spec = cfg['sim_spec']
    cfg_diffuse = cfg['sim_diffuse']
    cfg_out = cfg['sim_output']
    cfg_beam = cfg['sim_beam']
    cfg_gain = cfg['sim_gain']
    cfg_noise = cfg['sim_noise']

    # Construct array layout to simulate
    if cfg_spec['use_legacy_array']:
        # This is the deprecated legacy function
        ants = utils.build_array()
    else:
        ants = utils.build_hex_array(hex_spec=cfg_spec['hex_spec'],
                                     ants_per_row=cfg_spec['hex_ants_per_row'],
                                     d=cfg_spec['hex_ant_sep'])
예제 #41
0
        if self.return_sequence:
            return output
        else:
            return output[:, -1]


if __name__ == '__main__':

    from utils import load_config
    from loss import DiceLoss
    from tqdm import tqdm
    import torch.optim as optim

    config_file = 'config.yaml'
    config = load_config(config_file)
    input_modalites = int(config['PARAMETERS']['input_modalites'])
    output_channels = int(config['PARAMETERS']['output_channels'])
    base_channel = 4
    device = 'cuda:0' if torch.cuda.is_available() else 'cpu'

    def count_params(model):
        ''' print number of trainable parameters and its size of the model'''

        num_of_param = sum(p.numel() for p in model.parameters()
                           if p.requires_grad)
        print('Model {} : params number {}, params size: {:4f}M'.format(
            model._get_name(), num_of_param, num_of_param * 4 / 1000 / 1000))

    model = CenterLSTM(input_modalites,
                       output_channels,
예제 #42
0
파일: client.py 프로젝트: Cloudxtreme/DFS-1
    with closing(HTTPConnection(host, port)) as con:
        con.request('DELETE', filepath + '?lock_id=%s' % lock_id)

        status = con.getresponse().status

        if status != 200:
            raise DFSIOError('Error (%d) while deleting %s.' %
                             (status, filepath))


def rename(filepath, newfilepath):
    """Rename filepath to newfilepath."""

    with open(filepath) as f:
        with open(newfilepath, 'w') as nf:
            nf.write(f.read())

        unlink(filepath, f.lock_id)


open = File

_config = {
    'nameserver': None,
    'lockserver': None,
    'max_size': 1024**2,
}  # default
File._cache = {}
utils.load_config(_config, 'client.dfs.json')
예제 #43
0
            json.dump(meta_data, json_file)

        # Creating .mcpr zipfile based on timestamp
        print('Creating .mcpr file...')
        date = datetime.datetime.today().strftime('SARC_%Y%m%d_%H_%S')
        zipf = zipfile.ZipFile(date + '.mcpr', 'w', zipfile.ZIP_DEFLATED)
        zipf.write('metaData.json')
        zipf.write('recording.tmcpr')
        os.remove('metaData.json')
        os.remove('recording.tmcpr')
        print('Finished!')
    connection.close()
    return should_restart


config, email, password = utils.load_config()
debug = config['debug_mode']
address = (config['ip'], int(config['port']))
while True:
    try:
        if not run(config, email, password, debug, address):
            break
        else:
            print('Reconnecting...')
    except Exception as e:
        if debug:
            print('Connection lost: ' + traceback.format_exc())
        else:
            print('Connection lost: ' + str(e))
        if not config['auto_relog']:
            break
예제 #44
0
def test():
    config = load_config(FLAGS.config_file)
    logger = get_logger(FLAGS.log_file)

    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True

    with open(FLAGS.map_file, "rb") as f:
        char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)

    with tf.Session(config=tf_config) as sess:
        model = create_model(sess, Model, FLAGS.ckpt_path, load_word2vec,
                             config, id_to_char, logger)

        files = os.listdir(FLAGS.test_b_filepath)

        temp_dir = FLAGS.ner_result + '/submit_' + datetime.datetime.now(
        ).strftime('%Y%m%d_%H%M%S')
        print(temp_dir)
        os.makedirs(temp_dir)

        for f1 in files:
            f_name = f1.split(".")[0]
            with codecs.open(os.path.join(FLAGS.test_b_filepath, f1), "r",
                             "utf-8") as f2:
                s = f2.read()
                line = []
                sent = ''
                leng = 0  # 当前处理句子的位置长度
                for i in range(len(s)):
                    if s[i] != '。':
                        sent += s[i]
                    else:
                        sent += s[i]
                        line.append(sent)
                        sent = ''

                f3 = codecs.open(os.path.join(temp_dir, f_name + ".ann"), "w",
                                 "utf-8")
                print(f3.name)
                i = 0
                for info in line:
                    result = model.evaluate_line(
                        sess, input_from_line(info, char_to_id), id_to_tag)
                    tag = result['entities']
                    # print(tag[0])

                    for char in tag:
                        sent = "T" + str(i + 1) + "\t" + char['type'] + " "
                        if char['word'].find("\n") == 0 or char['word'].find(
                                " ") == 0:
                            char_start = char['word'][0]
                            start = char['start'] + 1 + leng
                            word_new = char['word'].replace(char_start, "")
                            if char['word'].endswith(
                                    "\n") or char['word'].endswith(" "):
                                char_end = char['word'][-1]
                                end = char['end'] - 1 + leng
                                sent = sent + str(start) + " " + str(
                                    end) + "\t" + word_new.replace(
                                        char_end, "")
                            elif 0 < char['word'].find("\n") < len(
                                    char['word']):
                                j = char['word'].find("\n")
                                sent = sent + str(start) + " " + str(
                                    char['start'] + leng + j) + ";" + str(
                                        char['start'] + leng + j +
                                        1) + " " + str(
                                            char['end'] +
                                            leng) + "\t" + word_new.replace(
                                                "\n", " ")
                            else:
                                sent = sent + str(start) + " " + str(
                                    char['end'] + leng) + "\t" + word_new
                        else:
                            start = char['start'] + leng
                            if char['word'].endswith(
                                    "\n") or char['word'].endswith(" "):
                                char_end = char['word'][-1]
                                end = char['end'] - 1 + leng
                                sent = sent + str(start) + " " + str(
                                    end) + "\t" + char['word'].replace(
                                        char_end, "")
                            elif 0 < char['word'].find("\n") < len(
                                    char['word']):
                                j = char['word'].find("\n")
                                sent = sent + str(start) + " " + str(
                                    char['start'] + leng + j) + ";" + str(
                                        char['start'] + leng + j +
                                        1) + " " + str(char['end'] +
                                                       leng) + "\t" + char[
                                                           'word'].replace(
                                                               "\n", " ")
                            else:
                                sent = sent + str(start) + " " + str(
                                    char['end'] + leng) + "\t" + char['word']

                        f3.write(sent + '\n')
                        i += 1
                    leng += len(info)
                f3.close()
예제 #45
0
def get_corpus(corpus_config_path):
    return KGCVAECorpus(config=utils.load_config(corpus_config_path))
예제 #46
0
 def test_load_config(self):
     expected_sp3url = "https://cats.oxfordfun.com"
     result = utils.load_config("config.json")
     self.assertEqual(expected_sp3url, result['sp3_url'])
import mesh_utils
import utils

parser = argparse.ArgumentParser(
    description='Create point clouds from ShapeNet objs.')
parser.add_argument('config', type=str, help='Path to config file.')
parser.add_argument('-v',
                    '--verbose',
                    dest='verbose',
                    action='store_true',
                    help='Be verbose.')
parser.set_defaults(verbose=False)
args = parser.parse_args()

# Read config.
cfg = utils.load_config(args.config)

shapenet_dir = cfg['data']['shapenet_dir']
out_dir = cfg['data']['out_dir']
meshes = cfg['data']['meshes']
num_points = cfg['data']['num_points']
bound = cfg['data']['bound']
num_train_transforms = cfg['data']['num_train_transforms']
num_test_transforms = cfg['data']['num_test_transforms']
rotation_max = cfg['data']['rotation_max']
translation_max = cfg['data']['translation_max']

# Setup out.
if not os.path.exists(out_dir):
    os.mkdir(out_dir)
예제 #48
0
def single_agent():
    config = load_config()
    # num_agents = config['num_agents']
    torch.set_num_threads(1)

    env = GymEnv(config=config)
    env.reset()

    net = ActorCritic(True, config)
    net.ActorNetwork.init_params()
    net.CriticNetwork.init_params()

    bwe = config['sending_rate'][config['default_bwe']]

    i = 1
    s_batch = []
    r_batch = []
    a_batch = []

    # experience RTC if not forced to stop
    ax = []
    ay = []
    plt.ion()
    while True:
        # todo: Agent interact with gym
        state, reward, done, _ = env.step(bwe)

        r_batch.append(reward)

        action = net.predict(state)
        bwe = config['sending_rate'][action]
        a_batch.append(action)
        s_batch.append(state)

        # todo: need to be fixed
        if done:
            action = config['default_bwe']
            bwe = config['sending_rate'][action]
            # update network
            net.getNetworkGradient(s_batch, a_batch, r_batch, done)
            net.updateNetwork()
            print('Network update.')

            i += 1
            ax.append(i)
            # ay.append(entropy)
            ay.append(reward)
            plt.clf()
            plt.plot(ax, ay)
            plt.pause(0.1)
            # s_batch.append(np.zeros(config['state_dim'], config['state_length']))
            # a_batch.append(action)
            env.reset()
            print('Environment has been reset.')
            print('Epoch {}, Reward: {}'.format(i - 1, reward))
        if i % 100 == 0:
            # print('Current BWE: ' + str(bwe))
            torch.save(net.ActorNetwork.state_dict(),
                       config['model_dir'] + '/actor1_{}.pt'.format(str(i)))
            torch.save(net.CriticNetwork.state_dict(),
                       config['model_dir'] + '/critic13m_{}.pt'.format(str(i)))
            print('Model Restored.')
예제 #49
0
def evitram():
    # Restore pretrained model
    restorestr = pxfinestr.split('.meta')[0]

    # Save model str
    evitramstr = evitramfinestr.split('.meta')[0]

    # Load pretrained evidence representations for all sources
    K = []
    for e in sys.argv[3:]:
        cp2 = utils.load_config(e)
        K.append(cp2.get('Experiment', 'PX_Z_TRAIN'))

    sect = 'Experiment'
    ev_paths = [cp.get(sect, i) for i in cp.options(sect) if 'evidence' in i]

    if cp.get('Experiment', 'PREFIX') == 'MNIST':
        evitram_dict = ConvAE.build_EviTRAM(cp, SEED)
    else:
        # Layerwise autoencoder number
        ae_ids = [str(i) for i in xrange(cp.getint('Experiment', 'AENUM'))]
        evitram_dict = SAE.build_EviTRAM(cp, ae_ids, SEED)

    # Get variables to restore from pretrained model P(x) Encoder
    var_list = tf.trainable_variables()

    for ev_path_id, ev_path in enumerate(ev_paths):
        if cp.get('Experiment', 'PREFIX') == 'MNIST':
            # Prepare "restore" variable list
            for v in var_list:
                if v.name == 'Pre_Q' + str(ev_path_id) + '/kernel:0':
                    var_list.remove(v)
            for v in var_list:
                if v.name == 'Pre_Q' + str(ev_path_id) + '/bias:0':
                    var_list.remove(v)
        else:
            # Prepare "restore" variable list
            for v in var_list:
                if v.name == 'Pre_Q' + str(ev_path_id) + '/kernel:0':
                    var_list.remove(v)
            for v in var_list:
                if v.name == 'Pre_Q' + str(ev_path_id) + '/bias:0':
                    var_list.remove(v)
            for v in var_list:
                if v.name == 'Pre_Comp_Q' + str(ev_path_id) + '/kernel:0':
                    var_list.remove(v)
            for v in var_list:
                if v.name == 'Pre_Comp_Q' + str(ev_path_id) + '/bias:0':
                    var_list.remove(v)

    ##########################################################
    # Tensorboard (comment / uncomment)
    ##########################################################

    from datetime import datetime

    now = datetime.utcnow().strftime("%m-%d_%H-%M:%S")
    root_logdir = cp.get('Experiment', 'ModelOutputPath')
    logdir = "{}/{}{}-{}/".format(
        root_logdir,
        cp.get('Experiment', 'PREFIX') + '_' +
        cp.get('Experiment', 'Enumber') + '_cond', sys.argv[2], now)
    tf.summary.scalar(name='cond loss', tensor=evitram_dict['evitram_loss'])
    tf.summary.scalar(name='recon loss', tensor=evitram_dict['px_mse'])
    summary = tf.summary.merge_all()
    file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())

    ##########################################################

    # Initialize & restore P(x) AE weights
    init = tf.global_variables_initializer()
    saver = tf.train.Saver(var_list)
    saverCOND = tf.train.Saver()

    # Task outcomes
    EV = [np.load(i) for i in K]

    # Start Session
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        # Init values
        init.run()
        # Restore finetuned model
        saver.restore(sess, restorestr)

        train_dict = {
            'cp': cp,
            'sess': sess,
            'data': XX_full,
            'sumr': summary,
            'savestr': evitramstr,
            'saver': saverCOND,
            'fw': file_writer,
            'EV': EV,
            'ev_paths': ev_paths
        }

        if cp.get('Experiment', 'PREFIX') == 'MNIST':
            ConvAE.evitram_train(train_dict, evitram_dict)
        else:
            SAE.evitram_train(train_dict, evitram_dict)

        # Get batch size for batch output save
        batch_size = train_dict['cp'].getint('Hyperparameters', 'BatchSize')

        if cp.get('Experiment', 'PREFIX') == 'MNIST':
            # Save hidden/output layer results for pipeline training
            px_Z_latent = utils.run_OOM(sess,
                                        evitram_dict['conv_in'],
                                        XX_full,
                                        evitram_dict['conv_z'],
                                        batch_size=batch_size)
        else:
            px_Z_latent = utils.run_OOM(sess,
                                        evitram_dict['sda_in'],
                                        XX_full,
                                        evitram_dict['sda_hidden'],
                                        batch_size=batch_size)
        #  utils.save_OOM(sess, pae_dict['conv_in'], XX_full,
        #  pae_dict['conv_out'],
        #  path=cp.get('Experiment', 'PX_XREC_TRAIN'),
        #  batch_size=batch_size)

    # Print clustering ACC
    utils.log_accuracy(cp, YY_full, px_Z_latent, 'COND - ACC FULL', SEED)

    # Print clustering NMI
    utils.log_NMI(cp, YY_full, px_Z_latent, 'COND - NMI FULL', SEED)

    sess.close()
예제 #50
0
        'Trajectory type to use when rendering the headings. (Default: gt). If not gt, the trajectory file is taken as <args.out_dir>/<data_name>_<use_trajectory_type>.npy with files generated in ronin_lstm_tcn.py or ronin_resnet.py'
    )
    '''
    Extra arguments
    Set True: use_scheduler, quite (no output on stdout)
              force_lr (force lr when a model is loaded from continue_from),
              heading_norm (normalize heading),
              separate_loss (report loss separately for logging)
    float: dropout, max_ori_error (err. threshold for priority grv in degrees)
           max_velocity_norm (filter outliers in training)
           weights (array of float values) 
    '''
    args, unknown_args = parser.parse_known_args()
    np.set_printoptions(formatter={'all': lambda x: '{:.6f}'.format(x)})

    args, kwargs = load_config(default_config_file, args, unknown_args)
    if args.mode == "train" and kwargs.get('weights') and type(
            kwargs.get('weights')) != list:
        kwargs['weights'] = [
            float(i) for i in kwargs.get('weights').split(',')
        ]

    print(args, kwargs)
    if args.mode == 'train':
        train(args, **kwargs)
    elif args.mode == 'test':
        if not args.model_path:
            raise ValueError("Model path required")
        args.batch_size = 1
        test(args, **kwargs)
예제 #51
0
def train():
    # load data sets
    datasets = load_sentences(FLAGS.train_file, FLAGS.lower)
    random.shuffle(datasets)
    train_sentences = datasets[:14000]
    test_sentences = datasets[14000:]

    # Use selected tagging scheme (IOB / IOBES)
    update_tag_scheme(train_sentences, FLAGS.tag_schema)
    update_tag_scheme(test_sentences, FLAGS.tag_schema)

    # create maps if not exist
    if not os.path.isfile(FLAGS.map_file):
        # create dictionary for word
        char_to_id, _ = elmo_char_mapping(FLAGS.elmo_vocab)

        # Create a dictionary and a mapping for tags
        _t, tag_to_id, id_to_tag = tag_mapping(train_sentences)
        with open(FLAGS.map_file, "wb") as f:
            pickle.dump([char_to_id, tag_to_id, id_to_tag], f)
    else:
        with open(FLAGS.map_file, "rb") as f:
            char_to_id, tag_to_id, id_to_tag = pickle.load(f)

    # prepare data, get a collection of list containing index
    train_data = prepare_dataset(train_sentences, char_to_id, tag_to_id,
                                 FLAGS.lower)
    test_data = prepare_dataset(test_sentences, char_to_id, tag_to_id,
                                FLAGS.lower)
    print("%i / %i sentences in train / dev." %
          (len(train_data), len(test_data)))

    elmo_batcher = get_batcher()
    train_manager = BatchManager(train_data, FLAGS.batch_size, elmo_batcher)
    test_manager = BatchManager(test_data, FLAGS.batch_size, elmo_batcher)
    # make path for store log and model if not exist
    make_path(FLAGS)
    if os.path.isfile(FLAGS.config_file):
        config = load_config(FLAGS.config_file)
    else:
        config = config_model(tag_to_id)
        save_config(config, FLAGS.config_file)
    make_path(FLAGS)

    log_path = os.path.join("log", FLAGS.log_file)
    logger = get_logger(log_path)
    print_config(config, logger)

    # limit GPU memory
    tf_config = tf.ConfigProto(allow_soft_placement=True)
    tf_config.gpu_options.allow_growth = True
    steps_per_epoch = train_manager.len_data
    with tf.Session(config=tf_config) as sess:
        elmo_model = load_elmo()
        model = create_model(sess, Model, FLAGS.ckpt_path, elmo_model, config,
                             logger)
        logger.info("start training")
        loss = []
        for i in range(FLAGS.max_epoch):
            for batch in train_manager.iter_batch(shuffle=True):
                step, batch_loss = model.run_step(sess, True, batch)
                loss.append(batch_loss)
                if step % FLAGS.steps_check == 0:
                    iteration = step // steps_per_epoch + 1
                    logger.info(
                        "iteration:{} step:{}/{}, NER loss:{:>9.6f}".format(
                            iteration, step % steps_per_epoch, steps_per_epoch,
                            np.mean(loss)))
                    loss = []

            best = evaluate(sess, model, "test", test_manager, id_to_tag,
                            logger)
            # evaluate(sess, model, "dev", dev_manager, id_to_tag, logger)
            if best:
                save_model(sess, model, FLAGS.ckpt_path, logger)
예제 #52
0
        self.detail_len = 0
        self.piece1_len = 0

        self.step = 0
        self.recv_message()


class appServer(TCPServer):
    def __init__(self, mysql_config):
        TCPServer.__init__(self, )
        self.__linkdb_logic = linkdbLogic.linkdbLogic(mysql_config)

    def handle_stream(self, stream, address):
        self.__linkdb_logic.mysql_ping()
        Connection(stream, address, self.__linkdb_logic)


if __name__ == '__main__':
    print "%s link_db_updater Server start ......" % (time.asctime(), )
    logger = logging.getLogger()
    hdlr = logging.FileHandler('./log/link_db_update.log')
    formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
    hdlr.setFormatter(formatter)
    logger.addHandler(hdlr)
    logger.setLevel(logging.INFO)
    # load the config
    my_config = utils.load_config(sys.argv[1])
    server = appServer(my_config['mysql_config'])
    server.listen(my_config['server_config']['port'])
    IOLoop.instance().start()
예제 #53
0
from confluent_kafka import Producer
import datetime
import os
import pathlib
import requests
import subprocess
import time

from alert_broker_ztf import watchdog
from utils import init_db_sync, load_config, log, Mongo


""" load config and secrets """
config = load_config(config_file="config.yaml")["kowalski"]


class Program:
    def __init__(self, group_name="FRITZ_TEST", group_nickname="Fritz"):
        self.access_token = config["skyportal"]["token"]
        self.base_url = (
            f"{config['skyportal']['protocol']}://"
            f"{config['skyportal']['host']}:{config['skyportal']['port']}"
        )
        self.headers = {"Authorization": f"token {self.access_token}"}

        self.group_name = group_name
        self.group_nickname = group_nickname
        self.group_id, self.filter_id = self.create()

    def get_groups(self):
        resp = requests.get(
예제 #54
0
import numpy as np
from gensim.models import Doc2Vec
from utils import load_config, load_lab_seq_sp, embedding_to_file, labels_to_file

config = load_config("config.json")

dim = config["embedding_dimensions"]
model_sym_path = config["model_sym_path"]
model_SP_path = config["model_sp_path"]
training_data_file = config["training_data"]
embeddings_data_path = config["trained_embeddings_path"]
labels_data_path = config["labels"]

model_sym = Doc2Vec.load(model_sym_path)
model_SP = Doc2Vec.load(model_SP_path)

input_trajectories, input_sp, labels = load_lab_seq_sp(training_data_file)

labels_dict = dict.fromkeys(labels)
num_trajs = len(list(labels_dict))
print("Found {} unique user trajectories".format(num_trajs))

sum_vector = np.zeros(dim, dtype=np.float64)
index = 0
export_labels = []
total_labels = len(labels)

for label in labels:
    if index % 500 == 0:
        print("Evaluating traj {}/{} of user {}".format(index, total_labels, label))
예제 #55
0
# -*- coding:utf-8 -*-
import tensorflow as tf
import os
from collections import OrderedDict

from init_data import DataInitializer
from dataset import DataManager
from utils import create_model, init_logger, add_log, save_model, clean_model, load_config, ensure_dir
from model import Model
from engine import Engine

config = load_config('data/origin_data/config.json')

flags = tf.app.flags
flags.DEFINE_boolean('clean', True, 'Clean train folder')
flags.DEFINE_boolean('is_train', True, 'Whether train the model or evaluate')
flags.DEFINE_boolean('preprocess', False, 'Preprocess the data')

flags.DEFINE_integer('embedding_dim', config['embedding_dim'],
                     'Embedding size of words')
flags.DEFINE_integer('vocabulary_size', config['vocabulary_size'],
                     'Vocabulary size of corpus')
flags.DEFINE_float('dropout_keep', 0.5, 'Dropout rate')
flags.DEFINE_integer('batch_size', 160, 'Batch size')
flags.DEFINE_float('lr', 0.5, 'Learning rate')
flags.DEFINE_string('optimizer', 'sgd', 'Optimizer for training')
flags.DEFINE_integer('num_epochs', 30, 'Maximum training epochs')
flags.DEFINE_integer('steps_check', 5, 'Steps per checkpoint')
flags.DEFINE_integer('save_epoch', 2, 'Save model after n epochs')
flags.DEFINE_string(
    'epoch_range',
    ('vaccine_year', './domain_weights/vaccine_year',
     './split_data_idx/vaccine_year/'),
    ('economy_year', './domain_weights/economy_year',
     './split_data_idx/economy_year/'),
    ('yelp_rest_year', './domain_weights/yelp_rest_year',
     './split_data_idx/yelp_rest_year/'),
    ('yelp_hotel_year', './domain_weights/yelp_hotel_year',
     './split_data_idx/yelp_hotel_year/'),
    ('amazon_year', './domain_weights/amazon_year',
     './split_data_idx/amazon_year/'),
    ('dianping_year', './domain_weights/dianping_year',
     './split_data_idx/dianping_year/'),
]

# load the configurations
config = utils.load_config('./config.ini')
"""
Loop through each dataset
"""
for datap in data_list:
    print('Working on: -----------------' + datap[0])
    valid_result = open('./results/hawkes/results_rmsprop.txt', 'a')

    best_valid_f1 = 0
    best_model = None
    wt_iter = utils.load_weights(datap[1])

    # number of predicted labels
    if 'amazon' in datap[0] or 'yelp' in datap[0] or 'dianping' in datap[0]:
        config['rnn']['pred_num'] = 3
    else:
def main(conf: str, seed: int, gpu_index: int, data_path: str):
    global DEVICE
    conf = load_config(conf)
    if seed is not None:
        conf.seed = seed
    if gpu_index is not None and DEVICE == torch.device('cuda'):
        DEVICE = torch.device(f'cuda:{gpu_index}')
    if data_path is not None:
        conf['dataset']['params']['data_path'] = data_path
    logger.info(DEVICE)
    logger.info(conf)

    set_seed(conf.seed)
    from models import sub_task
    tau4vec = set_task(conf.sub_task_params, 'tau4vec', sub_task)
    logger.info('set_task: tau4vec')
    set_seed(conf.seed)
    higgsId = set_task(conf.sub_task_params, 'higgsId', sub_task)
    logger.info('set_task: higgsId')
    from models import MyDataset
    from models import MyMetrics
    set_seed(conf.seed)
    dataset = set_module([MyDataset], conf, 'dataset')
    set_seed(conf.seed)
    dataloader = DataLoader(dataset, batch_size=100, shuffle=True)
    logger.info('set dataloader')
    # #########################################################################
    # pre-train ###############################################################
    # #########################################################################
    logger.info('----- pretrain[0] start -----')
    pretrain_conf = conf.sub_task_params.tau4vec.pretrain
    for i, sub_model in enumerate(tau4vec):
        logger.info(f'pretrain: [0][{i}]')
        set_seed(conf.seed)
        optimizer = set_module([optim],
                               pretrain_conf,
                               'optimizer',
                               params=sub_model.parameters())
        loss_func = set_module([nn, MyLoss], pretrain_conf, 'loss_func')
        metrics = set_module([MyMetrics], pretrain_conf, 'metrics')
        activation = set_module([nn], pretrain_conf, 'activation')
        input_key = pretrain_conf.data.input_key
        target_key = pretrain_conf.data.target_key
        patience = pretrain_conf.patience
        tau4vec[i] = sub_task.pre_train(epochs=pretrain_conf.epochs,
                                        model=sub_model,
                                        dataloader=dataloader,
                                        optimizer=optimizer,
                                        loss_func=loss_func,
                                        input_key=input_key,
                                        target_key=target_key,
                                        device=DEVICE,
                                        patience=patience,
                                        metrics=metrics,
                                        activation=activation)
    logger.info('----- pretrain[0] end -----')
    logger.info('----- pretrain[1] start -----')
    pretrain_conf = conf.sub_task_params.higgsId.pretrain
    for i, sub_model in enumerate(higgsId):
        logger.info(f'pretrain: [1][{i}]')
        set_seed(conf.seed)
        optimizer = set_module([optim],
                               pretrain_conf,
                               'optimizer',
                               params=sub_model.parameters())
        loss_func = set_module([nn], pretrain_conf, 'loss_func')
        metrics = set_module([MyMetrics], pretrain_conf, 'metrics')
        activation = set_module([nn], pretrain_conf, 'activation')
        input_key = pretrain_conf.data.input_key
        target_key = pretrain_conf.data.target_key
        patience = pretrain_conf.patience
        higgsId[i] = sub_task.pre_train(epochs=pretrain_conf.epochs,
                                        model=sub_model,
                                        dataloader=dataloader,
                                        optimizer=optimizer,
                                        loss_func=loss_func,
                                        input_key=input_key,
                                        target_key=target_key,
                                        device=DEVICE,
                                        patience=patience,
                                        metrics=metrics,
                                        activation=activation)
    logger.info('----- pretrain[1] end -----')

    # #########################################################################
    # #########################################################################
    logger.info('copy the pretrain models')
    pre_trained_tau4vec = set_task(conf.sub_task_params, 'tau4vec', sub_task)
    pre_trained_higgsId = set_task(conf.sub_task_params, 'higgsId', sub_task)
    pre_trained_model = [pre_trained_tau4vec, pre_trained_higgsId]
    task = [tau4vec, higgsId]
    for num_task, sub in enumerate(task):
        for num_model in range(len(sub)):
            pre_trained_model[num_task][num_model].load_state_dict(
                deepcopy(task[num_task][num_model].state_dict()))
    # #########################################################################
    # #########################################################################

    logger.info('----- SPOS-NAS start -----')
    sposnas_conf = conf.SPOS_NAS

    def make_output_dict():
        return {
            'X': [],
            'AUC': {
                f'{f}_{s}': []
                for f, s in product(FIRST_MODEL_NAME, SECOND_MODEL_NAME)
            },
            'LOSS_1ST': {
                f'{f}_{s}': []
                for f, s in product(FIRST_MODEL_NAME, SECOND_MODEL_NAME)
            },
            'LOSS_2ND': {
                f'{f}_{s}': []
                for f, s in product(FIRST_MODEL_NAME, SECOND_MODEL_NAME)
            },
            'RATIO': {
                f'{f}_{s}': []
                for f, s in product(FIRST_MODEL_NAME, SECOND_MODEL_NAME)
            },
            'ONLY_PT_RATIO': {
                f'{f}_{s}': []
                for f, s in product(FIRST_MODEL_NAME, SECOND_MODEL_NAME)
            },
        }

    # evaluate only pre-train model
    loss_func = [
        set_module([nn, MyLoss], sposnas_conf, 'loss_first'),
        set_module([nn, MyLoss], sposnas_conf, 'loss_second')
    ]
    loss_weight = [0.5, 0.5]
    metrics = get_module([MyMetrics], 'Calc_Auc')()
    from models.SPOS_NAS import SPOS
    model = SPOS(task=task, loss_func=loss_func, loss_weight=loss_weight)
    model.to(DEVICE)
    logger.info('evaluate only pre-train model')
    dummy = make_output_dict()
    for now_choice in product(range(3), range(3)):
        pre_train_result = evaluate(model, conf, dataloader, metrics, dummy,
                                    now_choice)

    output_dict = make_output_dict()
    X_list = [0.0, 0.1, 0.5]
    for X in (np.array(X_list)).round(10):
        output_dict['X'].append(X)
        logger.info(f'loss_ratio: {X:.6f} (loss_1*X + loss_2*(1-X)) start')
        set_seed(conf.seed)

        def initialize_pretrain_weight():
            logger.info('load pretrain models...')
            for num_task, sub in enumerate(task):
                for num_model in range(len(sub)):
                    task[num_task][num_model].load_state_dict(
                        deepcopy(pre_trained_model[num_task]
                                 [num_model].state_dict()))
            logger.info('load pretrain models done')

        logger.info('set model parameters...')
        loss_func = [
            set_module([nn, MyLoss], sposnas_conf, 'loss_first'),
            set_module([nn, MyLoss], sposnas_conf, 'loss_second')
        ]
        loss_weight = [X, 1. - X]
        metrics = get_module([MyMetrics], 'Calc_Auc')()

        for now_choice in product(range(3), range(3)):
            initialize_pretrain_weight()
            model = SPOS(task=task,
                         loss_func=loss_func,
                         loss_weight=loss_weight)
            model.to(DEVICE)
            optimizer = set_module([optim],
                                   sposnas_conf,
                                   'optimizer',
                                   params=model.parameters())
            scheduler = set_module([optim.lr_scheduler],
                                   sposnas_conf,
                                   'scheduler',
                                   optimizer=optimizer)
            logger.info('set model parameters done')
            logger.info('fit model...')
            model.fit(epochs=sposnas_conf.epochs,
                      dataloader=dataloader,
                      device=DEVICE,
                      optimizer=optimizer,
                      scheduler=scheduler,
                      patience=sposnas_conf.patience,
                      choice=now_choice)
            logger.info('fit model done')
            logger.info('eval model...')
            output_dict = evaluate(model, conf, dataloader, metrics,
                                   output_dict, now_choice)
            logger.info('eval model done')

    logger.info(f'seed: {conf.seed}/ pretrain result: {pre_train_result}')
    logger.info(f'seed: {conf.seed}/ final result: {output_dict}')

    logger.info('all train and eval step are done')

    logger.info('plot results...')
    logger.info('plot auc...')
    import matplotlib.pyplot as plt
    plt.style.use('seaborn-darkgrid')
    import pandas as pd
    df = pd.DataFrame(output_dict['AUC'], index=output_dict['X'])
    df = df.rename(
        columns={
            f'{f}_{s}': f'{f}:{s}'
            for f, s in product(FIRST_MODEL_NAME, SECOND_MODEL_NAME)
        })
    df.plot()
    plt.xlabel('X')
    plt.ylabel('AUC')
    plt.savefig(f'grid_auc_{conf.seed}.png')
    plt.close()

    logger.info('plot loss_2ND...')
    import matplotlib.pyplot as plt
    plt.style.use('seaborn-darkgrid')
    df = pd.DataFrame(output_dict['LOSS_2ND'], index=output_dict['X'])
    df = df.rename(
        columns={
            f'{f}_{s}': f'{f}:{s}'
            for f, s in product(FIRST_MODEL_NAME, SECOND_MODEL_NAME)
        })
    df.plot()
    plt.xlabel('X')
    plt.ylabel('LOSS_2ND')
    plt.savefig(f'grid_loss_2nd_{conf.seed}.png')
    plt.close()

    logger.info('plot loss_1ST...')
    import matplotlib.pyplot as plt
    plt.style.use('seaborn-darkgrid')
    df = pd.DataFrame(output_dict['LOSS_1ST'], index=output_dict['X'])
    df = df.rename(
        columns={
            f'{f}_{s}': f'{f}:{s}'
            for f, s in product(FIRST_MODEL_NAME, SECOND_MODEL_NAME)
        })
    df.plot()
    plt.xlabel('X')
    plt.ylabel('LOSS_1ST')
    plt.savefig(f'grid_loss_1st_{conf.seed}.png')
    plt.close()

    logger.info('plot ratios...')
    import matplotlib.pyplot as plt
    plt.style.use('seaborn-darkgrid')
    df = pd.DataFrame(output_dict['ONLY_PT_RATIO'], index=output_dict['X'])
    df = df.rename(
        columns={
            f'{f}_{s}': f'{f}:{s}'
            for f, s in product(FIRST_MODEL_NAME, SECOND_MODEL_NAME)
        })
    df.plot()
    plt.ylabel('ratio')
    plt.savefig(f'grid_only_pt_ratio_{conf.seed}.png')
    plt.close()
    import matplotlib.pyplot as plt
    plt.style.use('seaborn-darkgrid')
    df = pd.DataFrame(output_dict['RATIO'], index=output_dict['X'])
    df = df.rename(
        columns={
            f'{f}_{s}': f'{f}:{s}'
            for f, s in product(FIRST_MODEL_NAME, SECOND_MODEL_NAME)
        })
    df.plot()
    plt.ylabel('ratio')
    plt.savefig(f'grid_ratio_{conf.seed}.png')
    plt.close()
    logger.info('plot results done')
예제 #58
0
파일: engine.py 프로젝트: pxsocs/warden
def realtime_price(ticker, fx=None, source=None, parsed=True):
    '''
    Gets realtime price from first provider available and returns
    result = {
            'symbol': ,
            'name': ,
            'price': ,
            'fx': ,
            'time': ,
            'timezone':
            'source':
        }
    '''
    if fx is None:
        config = load_config()
        fx = config['PORTFOLIO']['base_fx']

    if fx == 'USD':
        fxrate = 1
    else:
        from pricing_engine.alphavantage import realtime as aa_realtime
        fxrate = aa_realtime(fx)
        fxrate = parseNumber(fxrate['price'])

    ticker = ticker.replace(' ', '')
    if source and type(source) != list:
        raise TypeError("source has to be a list of strings - can be one string inside a list")

    try:
        source_list = mapping[ticker]
    except KeyError:
        source_list = [
            'cryptocompare',
            'alphavantage_currency',
            'alphavantage_global',
            'twelvedata',
            'fmp'
        ]

    from pricing_engine.alphavantage import realtime as aa_realtime
    from pricing_engine.cryptocompare import realtime as cc_realtime
    from pricing_engine.fmp import realtime as fmp_realtime
    from pricing_engine.twelvedata import realtime as td_realtime

    results = None
    # Gets from each source
    for src in source_list:
        if src == 'alphavantage_currency':
            results = aa_realtime(ticker, 'USD', 'CURRENCY_EXCHANGE_RATE', parsed=parsed)
        if src == 'alphavantage_global':
            results = aa_realtime(ticker, 'USD', 'GLOBAL_QUOTE', parsed=parsed)
        if src == 'cryptocompare':
            results = cc_realtime(ticker, 'USD', parsed=parsed)
        if src == 'fmp':
            results = fmp_realtime(ticker, parsed=parsed)
        if src == 'twelvedata':
            results = td_realtime(ticker, parsed=parsed)
        # Check if data is valid
        if results is not None:
            if parsed and 'price' in results:
                if results['price'] is not None:
                    if isinstance(results['time'], str):
                        results['time'] = parser.parse(results['time'])
                    results['price'] = parseNumber(results['price'])
                    results['price'] = (
                        results['price'] / fxrate)
                    return (results)
    return (results)
예제 #59
0
파일: main.py 프로젝트: DCdream/DA-CRF
def train():
    # load data sets
    # train_sentences = load_sentences(FLAGS.train_file, FLAGS.lower, FLAGS.zeros)
    # dev_sentences = load_sentences(FLAGS.dev_file, FLAGS.lower, FLAGS.zeros)
    all_train_sentences = load_sentences(FLAGS.train_file, FLAGS.lower,
                                         FLAGS.zeros)
    train_sentences, dev_sentences = split_train_dev(all_train_sentences)
    test_sentences = load_sentences(FLAGS.test_file, FLAGS.lower, FLAGS.zeros)

    # Use selected tagging scheme (IOB / IOBES)
    update_tag_scheme(train_sentences, FLAGS.tag_schema)
    update_tag_scheme(test_sentences, FLAGS.tag_schema)

    # update_tag_scheme(dev_sentences, FLAGS.tag_schema)

    # create maps if not exist
    if not os.path.isfile(FLAGS.map_file):
        # create dictionary for word
        if FLAGS.pre_emb:
            # dico_chars_train = char_mapping(train_sentences, FLAGS.lower)[0]
            dico_chars_train = char_mapping(all_train_sentences,
                                            FLAGS.lower)[0]
            dico_chars, char_to_id, id_to_char = augment_with_pretrained(
                dico_chars_train.copy(), FLAGS.emb_file,
                list(
                    itertools.chain.from_iterable([[w[0] for w in s]
                                                   for s in test_sentences])))
        else:
            _c, char_to_id, id_to_char = char_mapping(all_train_sentences,
                                                      FLAGS.lower)
        # _c, char_to_id, id_to_char = char_mapping(train_sentences, FLAGS.lower)

        # Create a dictionary and a mapping for tags
        _t, tag_to_id, id_to_tag = tag_mapping(all_train_sentences)
        # _t, tag_to_id, id_to_tag = tag_mapping(train_sentences)
        with open(FLAGS.map_file, "wb") as f:
            pickle.dump([char_to_id, id_to_char, tag_to_id, id_to_tag], f)
    else:
        with open(FLAGS.map_file, "rb") as f:
            char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)

# nlp = StanfordCoreNLP(r'E:\DC\dataset\泰一指尚评测数据\stanford-corenlp-full-2017-06-09')
#l_sorted_lexcion = load_lexcion(FLAGS.lexcion_file, nlp)
    l_sorted_lexcion = []
    # prepare data, get a collection of list containing index
    train_data = prepare_dataset(train_sentences, char_to_id, tag_to_id,
                                 l_sorted_lexcion, FLAGS.lower)
    dev_data = prepare_dataset(dev_sentences, char_to_id, tag_to_id,
                               l_sorted_lexcion, FLAGS.lower)
    test_data = prepare_dataset(test_sentences, char_to_id, tag_to_id,
                                l_sorted_lexcion, FLAGS.lower)
    print("%i / %i / %i sentences in train / dev / test." %
          (len(train_data), len(dev_data), len(test_data)))

    max_len = max(
        [len(sentence[0]) for sentence in train_data + test_data + dev_data])

    train_manager = BatchManager(train_data, FLAGS.batch_size, max_len)
    dev_manager = BatchManager(dev_data, 800, max_len)
    test_manager = BatchManager(test_data, 800, max_len)

    # random.shuffle(train_data)

    # pad_test_data = pad_data(test_data)
    # pad_dev_data = pad_data(dev_data)

    # make path for store log and model if not exist
    make_path(FLAGS)
    if os.path.isfile(FLAGS.config_file):
        config = load_config(FLAGS.config_file)
    else:
        config = config_model(char_to_id, tag_to_id, max_len)
        save_config(config, FLAGS.config_file)
    make_path(FLAGS)

    log_path = os.path.join("log", FLAGS.log_file)
    logger = get_logger(log_path)
    print_config(config, logger)

    # limit GPU memory
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    steps_per_epoch = train_manager.len_data
    with tf.Session(config=tf_config) as sess:
        model = create_model(sess, Model, FLAGS.ckpt_path, load_word2vec,
                             config, id_to_char, logger)
        logger.info("start training")
        loss = []
        for i in range(FLAGS.max_epoch):
            random.shuffle(train_data)
            pad_train_data = pad_data(train_data, max_len)
            strings, chars, lexcion_teatures, pos_ids, dep_ids, head_ids, targets = pad_train_data
            for j in range(0, len(strings), FLAGS.batch_size):
                batch = [
                    strings[j:j + FLAGS.batch_size],
                    chars[j:j + FLAGS.batch_size],
                    lexcion_teatures[j:j + FLAGS.batch_size],
                    pos_ids[j:j + FLAGS.batch_size],
                    dep_ids[j:j + FLAGS.batch_size],
                    head_ids[j:j + FLAGS.batch_size],
                    targets[j:j + FLAGS.batch_size]
                ]
                step, batch_loss = model.run_step(sess, True, batch)
                loss.append(batch_loss)
                if step % FLAGS.steps_check == 0:
                    iteration = step // steps_per_epoch + 1
                    logger.info("iteration:{} step:{}/{}, "
                                "AS loss:{:>9.6f}".format(
                                    iteration, step % steps_per_epoch,
                                    steps_per_epoch, np.mean(loss)))
                    loss = []

            best = evaluate(sess, model, "dev", dev_manager, id_to_tag, logger)
            if best:
                save_model(sess, model, FLAGS.ckpt_path, logger, i)
                evaluate(sess, model, "test", test_manager, id_to_tag, logger)
        evaluate(sess, model, "test", test_manager, id_to_tag, logger)
def run_to_task(task_to):
    import general_utils
    from general_utils import RuntimeDeterminedEnviromentVars
    import models.architectures as architectures
    from data.load_ops import resize_rescale_image
    import utils
    from data.task_data_loading import load_and_specify_preprocessors_for_representation_extraction
    import lib.data.load_ops as load_ops
    from importlib import reload
    import tensorflow as tf
    tf.logging.set_verbosity(tf.logging.ERROR)

    # for arch in ['regular', 'shallow', 'dilated_shallow', 'dilated_regular']:
    arch = args.arch
    data_amount = args.data
    if not args.no_regenerate_data:
        #if False:
        all_outputs = {}
        pickle_dir = 'viz_{task_to}_transfer_{hs}_{arch}.pkl'.format(
            arch=arch, hs=args.hs, task_to=task_to)
        subprocess.call(
            "aws s3 cp s3://task-preprocessing-512-oregon/visualizations/transitive_viz/viz_{}.pkl {}"
            .format(task_to, pickle_dir),
            shell=True)
        import os
        if os.path.isfile(pickle_dir):
            with open(pickle_dir, 'rb') as fp:
                all_outputs = pickle.load(fp)

        global list_of_src_tasks
        for i, task_from in enumerate(list_of_src_tasks):
            task_key = task_from.split('/')[-1]
            if task_key in all_outputs:
                print("{} already exists....\n\n\n".format(task_key))
                continue
            print("Doing from {task_from} to {task_to}".format(
                task_from=task_key, task_to=task_to))
            general_utils = importlib.reload(general_utils)
            tf.reset_default_graph()
            training_runners = {
                'sess': tf.InteractiveSession(),
                'coord': tf.train.Coordinator()
            }

            CONFIG_DIR = task_from
            print(CONFIG_DIR)

            ############## Load Configs ##############
            cfg = utils.load_config(CONFIG_DIR, nopause=True)
            RuntimeDeterminedEnviromentVars.register_dict(cfg)
            if args.second_order and not to_flip_dict[task]:
                cfg['val_representations_file'] = cfg[
                    'val_representations_file'][::-1]
            cfg['num_epochs'] = 1
            cfg['randomize'] = False
            root_dir = cfg['root_dir']
            cfg['num_read_threads'] = 1
            cfg['model_path'] = tf.train.latest_checkpoint(
                os.path.join(cfg['log_root'], 'logs', 'slim-train'
                             #'time'
                             ))
            # print(cfg['model_path'])
            if cfg['model_path'] is None and task == 'random':
                cfg['model_path'] = tf.train.latest_checkpoint(
                    os.path.join(cfg['log_root'], 'logs', 'slim-train',
                                 'time'))
            if cfg['model_path'] is None:
                continue

            ############## Set Up Inputs ##############
            # tf.logging.set_verbosity( tf.logging.INFO )
            inputs = utils.setup_input_transfer(
                cfg, is_training=ON_TEST_SET, use_filename_queue=False
            )  # is_training determines whether to use train/validaiton
            RuntimeDeterminedEnviromentVars.load_dynamic_variables(inputs, cfg)
            RuntimeDeterminedEnviromentVars.populate_registered_variables()
            start_time = time.time()
            # utils.print_start_info( cfg, inputs[ 'max_steps' ], is_training=False )

            ############## Set Up Model ##############
            model = utils.setup_model(inputs, cfg, is_training=True)
            m = model['model']
            model['saver_op'].restore(training_runners['sess'],
                                      cfg['model_path'])

            ############## Start dataloading workers ##############
            data_prefetch_init_fn = utils.get_data_prefetch_threads_init_fn_transfer(
                inputs, cfg, is_training=ON_TEST_SET, use_filename_queue=False)

            prefetch_threads = threading.Thread(
                target=data_prefetch_init_fn,
                args=(training_runners['sess'], training_runners['coord']))
            prefetch_threads.start()

            ############## Run First Batch ##############
            (
                input_batch,
                representation_batch,
                target_batch,
                data_idx,
                encoder_output,
                predicted,
                loss,
            ) = training_runners['sess'].run([
                m.input_images, m.input_representations, m.decoder.targets,
                model['data_idxs'], m.encoder_output, m.decoder.decoder_output,
                m.total_loss
            ])
            if task_to == 'segment2d' or task_to == 'segment25d':
                from sklearn.decomposition import PCA
                x = np.zeros((32, 256, 256, 3), dtype='float')
                for i in range(predicted.shape[0]):
                    embedding_flattened = np.squeeze(predicted[i]).reshape(
                        (-1, 64))
                    pca = PCA(n_components=3)
                    pca.fit(embedding_flattened)
                    lower_dim = pca.transform(embedding_flattened).reshape(
                        (256, 256, -1))
                    lower_dim = (lower_dim - lower_dim.min()) / (
                        lower_dim.max() - lower_dim.min())
                    x[i] = lower_dim
                predicted = x
            if task_to == 'segmentsemantic_rb':
                predicted = np.argmax(predicted, axis=-1)
            ############## Clean Up ##############
            training_runners['coord'].request_stop()
            training_runners['coord'].join()

            # if os.path.isfile(pickle_dir):
            #     with open(pickle_dir, 'rb') as fp:
            #         all_outputs = pickle.load(fp)

            ############## Store to dict ##############
            to_store = {
                'data_idx': data_idx,
                'output': predicted,
                'loss': loss
            }
            all_outputs[task_key] = to_store

            # os.system("sudo cp {d} /home/ubuntu/s3/model_log".format(d=pickle_dir))

            ############## Reset graph and paths ##############
            tf.reset_default_graph()
            training_runners['sess'].close()
            #print(sys.modules.keys())
            #del sys.modules[ 'config' ]
            sys.path = remove_dups(sys.path)
            print('Current Directory: ', os.getcwd())
            pickle_dir = 'viz_{task_to}_transfer_{hs}_{arch}.pkl'.format(
                arch=arch, hs=args.hs, task_to=task_to)
            with open(pickle_dir, 'wb') as fp:
                pickle.dump(all_outputs, fp)
            subprocess.call(
                "aws s3 cp {} s3://task-preprocessing-512-oregon/visualizations/transitive_viz/viz_{}.pkl"
                .format(pickle_dir, task_to),
                shell=True)

    # Run jupyter nb
    print('Running Jupyter Notebooks...')
    #os.makedirs("/home/ubuntu/task-taxonomy-331b/notebooks/transfer_viz/transfer_{hs}_{arch}".format(hs=args.hs, arch=arch), exist_ok=True)
    notebook_path = '/home/ubuntu/task-taxonomy-331b/notebooks/transfer_viz/Visual_{task_to}'.format(
        task_to=task_to)
    subprocess.call("jupyter nbconvert \
            --execute {notebook_path}.ipynb \
            --to html \
            --ExecutePreprocessor.kernel_name=python3 \
            --ExecutePreprocessor.timeout=1200 ".format(
        notebook_path=notebook_path, arch=arch, hs=args.hs, task_to=task_to),
                    shell=True)
    subprocess.call(
        "aws s3 cp {}.html s3://task-preprocessing-512-oregon/visualizations/{}/"
        .format(notebook_path, TRANSFER_TYPE),
        shell=True)