Пример #1
0
def _init():
    session = requests.Session()
    adapter = requests.adapters.HTTPAdapter(
        pool_connections=config.get_default('connection_pool'), pool_maxsize=config.get_default('connection_pool'),
        max_retries=config.get_default('connection_retries'))
    session.mount('http://', adapter)
    global _session
    _session = session
Пример #2
0
def irc_RPL_WELCOME(bot, prefix, params):
	"""
	Begin logging when the bot officially joins the network.
	"""
	global logger
	if logger is None:
		logger = BufferLogger(config.get_default("log", "dir", "greenbot"))
Пример #3
0
def run_on_variable(distribution, variable_name, values):
    measures = []
    results = {}
    for value in values:
        kwargs = config.get_default()
        kwargs[variable_name] = value
        temp = run_exp(distribution, **kwargs)
        for method in temp:
            if method not in results:
                results[method] = {}
            results[method][str(value)] = temp[method].report()
            if len(measures) == 0:
                measures = [x for x in results[method][str(value)]]

    ofile = open(distribution + '_' + variable_name + '.csv', 'w')
    for measure in measures:
        ofile.write(measure + '\n')
        ofile.write('method')
        for value in values:
            ofile.write('\t' + str(value))
        ofile.write('\n')
        for method in config.output_order:
            if method not in results:
                continue
            ofile.write(method)
            for value in values:
                ofile.write('\t' + str(results[method][str(value)][measure]))
            ofile.write('\n')
    ofile.close()
Пример #4
0
def _get(url, params, auth):
    try:
        r = requests.get(
            url, params=params, auth=RequestsAuth(auth) if auth is not None else None,
            timeout=config.get_default('connection_timeout'), headers=_headers)
    except Exception as e:
        return None, ResponseInfo(None, e)
    return __return_wrapper(r)
Пример #5
0
def _post(url, data, files, auth):
    if _session is None:
        _init()
    try:
        r = _session.post(
            url, data=data, files=files, auth=auth, headers=_headers, timeout=config.get_default('connection_timeout'))

    except Exception as e:
        return None, ResponseInfo(None, e)
    return __return_wrapper(r)
Пример #6
0
	def buildProtocol(self, addr):
		bot = GreenBot()

		# set session properties that should be set /before/ we connect
		# the value is chosen from several according to this precedence order:
		# 	defaults < configuration file entries < command line arguments
		bot.factory = self

		bot.nickname = config.get_default("bot", "nickname", "greenbot") \
					   if (self.properties['nickname'] is None) \
					   else self.properties['nickname'];
		bot.username = config.get_default("bot", "username", "greenbot") \
					   if (self.properties['username'] is None) \
					   else self.properties['username'];
		bot.password = config.get_default("server", "password", "greenbot") \
					   if (self.properties['password'] is None) \
					   else self.properties['password'];

		bot.load_modules()

		# reset delay timer for reconnecting clients
		self.resetDelay()

		return bot
Пример #7
0
	def __init__(self, prefix):
		self.prefix = prefix
		self.buffers = {}

		# if the log directory doesn't exist, create it
		if not os.path.isdir(prefix): os.mkdir(prefix)

		cycle_task = LoopingCall(self.cycle_all)

		duration = 0;
		try:
			duration = config.get_default("log", "cycle-duration", 3600*24)
		except ValueError:
			duration = 3600*24;

		cycle_task.start(duration);
Пример #8
0
def generate_data_on_variable(distribution, variable_name, values):
    for value in values:
        kwargs = config.get_default()
        kwargs[variable_name] = value
        generate_data(variable_name, distribution, **kwargs)
Пример #9
0
def main(args):
    config = cfg.get_default()
    cfg.set_params(config, args.config_path, args.set)
    cfg.freeze(config, True)
    print('- Configuration:')
    print(config)

    if config.dataset == 'FluidIceShake':
        n_groups = 2
        n_particles = 348
    elif config.dataset == 'RigidFall':
        n_groups = 3
        n_particles = 192
    elif config.dataset == 'MassRope':
        n_groups = 2
        n_particles = 95
    else:
        raise ValueError('Unsupported environment')

    # generate outputs for both train and valid data
    train_loader = get_dataloader(config, 'train', shuffle=False)
    valid_loader = get_dataloader(config, 'valid', shuffle=False)

    # build model
    model = PointSetNet(
        config.n_frames,
        config.pred_hidden,
        n_particles,
        n_groups,
        config.batchnorm,
        single_out=False,
        recur_pred=config.recur_pred).to(_DEVICE)

    # a model checkpoint must be loaded
    if config.load_path != '':
        print('- Loading model from {}'.format(config.load_path))

        # load model on GPU/CPU
        if torch.cuda.is_available():
            model.load_state_dict(torch.load(config.load_path))
        else:
            model.load_state_dict(
                torch.load(config.load_path, map_location='cpu'))

    else:
        raise ValueError('- Please provide a valid checkpoint')

    if config.log_eval:
        # [[train_data_loss], [valid_data_loss]]
        losses = []

    for loader, name in [(train_loader, 'train'), (valid_loader, 'valid')]:
        # load data with progress bar
        pbar = tqdm(loader)
        n_traj = 0

        # create directory to save output data
        save_dir = os.path.join(config.run_dir, 'eval', name)
        if not os.path.isdir(save_dir):
            os.makedirs(save_dir)

        if config.vis_eval:
            vis_save_dir = os.path.join(save_dir, 'out_vis')
            if not os.path.isdir(vis_save_dir):
                os.makedirs(vis_save_dir)

        if config.log_eval:
            # [(loss, pos_loss, grp_loss) for all data in current loader]
            loader_loss = []

        for images, positions, groups in pbar:
            if config.log_eval:
                model, _, loss, pos_loss, grp_loss = step(
                    config, model, None, images, positions, groups, False)
                loader_loss.append((loss, pos_loss, grp_loss))
                pbar.set_description('Loss {:f}'.format(loss))

            pbar.set_description('Generating video outputs')
            n_traj = generate_outputs(config, model, n_traj, images, save_dir)

            if config.vis:
                visualize(config, model, n_traj // config.batch_size,
                          n_particles, images, positions, groups, False)

        if config.log_eval:
            losses.append(loader_loss)

    if config.log_eval:
        # save all losses into JSON file
        stats = {}
        train_losses, valid_losses = losses
        (stats['train_losses'],
         stats['train_pos_losses'],
         stats['train_grp_losses']) = list(zip(*train_losses))
        (stats['valid_losses'],
         stats['valid_pos_losses'],
         stats['valid_grp_losses']) = list(zip(*valid_losses))

        with open(os.path.join(config.run_dir,
                               'eval_stats.json'), 'w') as fout:
            json.dump(stats, fout)
Пример #10
0
class ArgParser(object):
    """
    This class is responsible for parsing the command line arguments (if any)
    given to gramps, and determining if a GUI or a CLI session must be started.
    The valid arguments are:

    Possible: 
        1/ FAMTREE : Just the family tree (name or database dir)
        2/ -O, --open=FAMTREE, Open of a family tree
        3/ -i, --import=FILE, Import a family tree of any format understood 
                 by an importer, optionally provide -f to indicate format
        4/ -e, --export=FILE, export a family tree in required format,
                 optionally provide -f to indicate format
        5/ -f, --format=FORMAT : format after a -i or -e option
        6/ -a, --action: An action (possible: 'report', 'tool')
        7/ -p, --options=OPTIONS_STRING : specify options
        8/ -u, --force-unlock: A locked database can be unlocked by giving
                 this argument when opening it
    
    If the filename (no flags) is specified, the interactive session is 
    launched using data from filename. 
    In this mode (filename, no flags), the rest of the arguments is ignored.
    This is a mode suitable by default for GUI launchers, mime type handlers,
    and the like
    
    If no filename or -i option is given, a new interactive session (empty
    database) is launched, since no data is given anyway.
    
    If -O or -i option is given, but no -e or -a options are given, an
    interactive session is launched with the FILE (specified with -i). 
    
    If both input (-O or -i) and processing (-e or -a) options are given,
    interactive session will not be launched. 
    """
    def __init__(self, args):
        """
        Pass the command line arguments on creation.
        """
        self.args = args

        self.open_gui = None
        self.open = None
        self.exports = []
        self.actions = []
        self.imports = []
        self.imp_db_path = None
        self.list = False
        self.list_more = False
        self.list_table = False
        self.help = False
        self.usage = False
        self.force_unlock = False
        self.create = None
        self.runqml = False

        self.errors = []
        self.parse_args()

    #-------------------------------------------------------------------------
    # Argument parser: sorts out given arguments
    #-------------------------------------------------------------------------
    def parse_args(self):
        """
        Fill in lists with open, exports, imports, and actions options.

        Any errors are added to self.errors
        
        Possible: 
        1/ Just the family tree (name or database dir)
        2/ -O, --open:   Open of a family tree
        3/ -i, --import: Import a family tree of any format understood by
                 an importer, optionally provide -f to indicate format
        4/ -e, --export: export a family tree in required format, optionally
                 provide -f to indicate format
        5/ -f, --format=FORMAT : format after a -i or -e option
        6/ -a, --action: An action (possible: 'report', 'tool')
        7/ -p, --options=OPTIONS_STRING : specify options
        8/ -u, --force-unlock: A locked database can be unlocked by giving
                 this argument when opening it
        9/ -s  --show : Show config settings
        10/ -c --config=config.setting:value : Set config.setting and start
                 Gramps without :value, the actual config.setting is shown
                            
        """
        try:
            # Convert arguments to unicode, otherwise getopt will not work
            # if a non latin character is used as an option (by mistake).
            # getopt will try to treat the first char in an utf-8 sequence. Example:
            # -Ärik is '-\xc3\x84rik' and getopt will respond :
            # option -\xc3 not recognized
            for arg in range(len(self.args) - 1):
                self.args[arg + 1] = Utils.get_unicode_path_from_env_var(
                    self.args[arg + 1])
            options, leftargs = getopt.getopt(self.args[1:], const.SHORTOPTS,
                                              const.LONGOPTS)
        except getopt.GetoptError, msg:
            # Extract the arguments in the list.
            # The % operator replaces the list elements with repr() of the list elemements
            # which is OK for latin characters, but not for non latin characters in list elements
            cliargs = "[ "
            for arg in range(len(self.args) - 1):
                cliargs += self.args[arg + 1] + " "
            cliargs += "]"
            # Must first do str() of the msg object.
            msg = unicode(str(msg))
            self.errors += [
                (_('Error parsing the arguments'), msg + '\n' +
                 _("Error parsing the arguments: %s \n"
                   "Type gramps --help for an overview of commands, or "
                   "read the manual pages.") % cliargs)
            ]
            return

        if leftargs:
            # if there were an argument without option,
            # use it as a file to open and return
            self.open_gui = leftargs[0]
            print >> sys.stderr, "Trying to open: %s ..." % leftargs[0]
            #see if force open is on
            for opt_ix in range(len(options)):
                option, value = options[opt_ix]
                if option in ('-u', '--force-unlock'):
                    self.force_unlock = True
                    break
            return

        # Go over all given option and place them into appropriate lists
        cleandbg = []
        need_to_quit = False
        for opt_ix in range(len(options)):
            option, value = options[opt_ix]
            if option in ('-O', '--open'):
                self.open = value
            elif option in ('-C', '--create'):
                self.create = value
            elif option in ('-i', '--import'):
                family_tree_format = None
                if opt_ix < len(options) - 1 \
                   and options[opt_ix + 1][0] in ( '-f', '--format'):
                    family_tree_format = options[opt_ix + 1][1]
                self.imports.append((value, family_tree_format))
            elif option in ('-e', '--export'):
                family_tree_format = None
                if opt_ix < len(options) - 1 \
                   and options[opt_ix + 1][0] in ( '-f', '--format'):
                    family_tree_format = options[opt_ix + 1][1]
                self.exports.append((value, family_tree_format))
            elif option in ('-a', '--action'):
                action = value
                if action not in ('report', 'tool'):
                    print >> sys.stderr, "Unknown action: %s. Ignoring." % action
                    continue
                options_str = ""
                if opt_ix < len(options)-1 \
                            and options[opt_ix+1][0] in ( '-p', '--options' ):
                    options_str = options[opt_ix + 1][1]
                self.actions.append((action, options_str))
            elif option in ('-d', '--debug'):
                print >> sys.stderr, 'setup debugging', value
                logger = logging.getLogger(value)
                logger.setLevel(logging.DEBUG)
                cleandbg += [opt_ix]
            elif option in ('-l'):
                self.list = True
            elif option in ('-L'):
                self.list_more = True
            elif option in ('-t'):
                self.list_table = True
            elif option in ('-s', '--show'):
                print "Gramps config settings from %s:" % \
                       config.config.filename.encode(sys.getfilesystemencoding())
                for section in config.config.data:
                    for setting in config.config.data[section]:
                        print "%s.%s=%s" % (
                            section, setting,
                            repr(config.config.data[section][setting]))
                    print
                sys.exit(0)
            elif option in ('-c', '--config'):
                setting_name = value
                set_value = False
                if setting_name:
                    if ":" in setting_name:
                        setting_name, new_value = setting_name.split(":", 1)
                        set_value = True
                    if config.has_default(setting_name):
                        setting_value = config.get(setting_name)
                        print >> sys.stderr, "Current Gramps config setting: " \
                                   "%s:%s" % (setting_name, repr(setting_value))
                        if set_value:
                            # does a user want the default config value?
                            if new_value in ("DEFAULT", _("DEFAULT")):
                                new_value = config.get_default(setting_name)
                            else:
                                converter = Utils.get_type_converter(
                                    setting_value)
                                new_value = converter(new_value)
                            config.set(setting_name, new_value)
                            # translators: indent "New" to match "Current"
                            print >> sys.stderr, "    New Gramps config " \
                                            "setting: %s:%s" % (
                                                setting_name,
                                                repr(config.get(setting_name))
                                                )
                        else:
                            need_to_quit = True
                    else:
                        print >> sys.stderr, "Gramps: no such config setting:" \
                                             " '%s'" % setting_name
                        need_to_quit = True
                cleandbg += [opt_ix]
            elif option in ('-h', '-?', '--help'):
                self.help = True
            elif option in ('-u', '--force-unlock'):
                self.force_unlock = True
            elif option in ('--usage'):
                self.usage = True
            elif option in ('--qml'):
                self.runqml = True

        #clean options list
        cleandbg.reverse()
        for ind in cleandbg:
            del options[ind]

        if len(options) > 0 and self.open is None and self.imports == [] \
                and not (self.list or self.list_more or self.list_table or
                         self.help or self.runqml):
            # Extract and convert to unicode the arguments in the list.
            # The % operator replaces the list elements with repr() of
            # the list elements, which is OK for latin characters
            # but not for non-latin characters in list elements
            cliargs = "[ "
            for arg in range(len(self.args) - 1):
                cliargs += Utils.get_unicode_path_from_env_var(
                    self.args[arg + 1]) + " "
            cliargs += "]"
            self.errors += [(_('Error parsing the arguments'),
                             _("Error parsing the arguments: %s \n"
                               "To use in the command-line mode, supply at "
                               "least one input file to process.") % cliargs)]
        if need_to_quit:
            sys.exit(0)
Пример #11
0
        if opt in ("-h", "--help"):
            print_help()
            sys.exit()
        elif opt == "-x":
            print("\nDownload flag has been set.")
            DOWN_FLAG = False
        elif opt == "-f":
            FOLD_FLAG = True
            customFolder = arg
        elif opt == "--hang":
            HANG_FLAG = True
        elif opt in ("-s", "--search-method"):
            searchMethod = arg
            print(searchMethod)

    folderloc = config.get_default("Folder")
    url = config.get_default("URL")
    if searchMethod is "":
        searchMethod = config.get_default("Search_Method")["value"]

    # Set download and read location to custom folder if -f <folder> option is used
    if FOLD_FLAG:
        fileloc = folderloc["value"] + customFolder + "\\"
    else:
        date = re.sub(
            "/", "_",
            time.strftime("%x"))  # Get current date with format: 'MM_DD_YY'
        fileloc = folderloc["value"] + date + "\\"

    if not os.path.exists(
            fileloc):  # If file location does not exist, create it
Пример #12
0
def main(args):
    config = cfg.get_default()
    cfg.set_params(config, args.config_path, args.set)
    cfg.freeze(config, True)
    print('- Configuration:')
    print(config)

    if config.dataset == 'FluidIceShake':
        n_groups = 2
        n_particles = 348
    elif config.dataset == 'RigidFall':
        n_groups = 3
        n_particles = 192
    elif config.dataset == 'MassRope':
        n_groups = 2
        n_particles = 95
    else:
        raise ValueError('Unsupported environment')

    train_loader = get_dataloader(config, 'train')
    valid_loader = get_dataloader(config, 'valid')

    # build model
    model = PointSetNet(
        config.n_frames,
        config.pred_hidden,
        n_particles,
        n_groups,
        config.batchnorm,
        single_out=config.single_out,
        recur_pred=config.recur_pred,
        use_temp_encoder=config.use_temp_encoder,
        conv_temp_encoder=config.conv_temp_encoder,
        temp_embedding_size=config.temp_embedding_size).to(_DEVICE)

    print('- Model architecture:')
    print(model)

    if config.load_path != '':
        print('- Loading model from {}'.format(config.load_path))
        model.load_state_dict(torch.load(config.load_path))

    # build optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)

    if not config.debug:
        print('- Training start')
        stats = {
            'epoch': [],
            'valid_losses': [],
            'train_losses': [],
            'train_pos_losses': [],
            'train_group_losses': [],
            'valid_pos_losses': [],
            'valid_group_losses': [],
        }
        best_valid_loss = np.Inf

        for epoch in range(config.n_epochs):

            # training
            print('- Training epoch {:d}'.format(epoch))
            epoch_train_losses = []
            epoch_train_pos_losses = []
            epoch_train_grp_losses = []

            pbar = tqdm(train_loader)
            did_vis = False
            for images, positions, groups in pbar:
                (model, optimizer, train_loss,
                 train_pos_loss, train_grp_loss) = step(
                    config, model, optimizer, images, positions, groups, True)
                epoch_train_losses.append(train_loss)
                epoch_train_pos_losses.append(train_pos_loss)
                epoch_train_grp_losses.append(train_grp_loss)
                pbar.set_description('Train loss {:f}'.format(train_loss))

                # visualize training results
                if not did_vis \
                        and config.vis and (epoch + 1) % config.vis_every == 0:
                    pbar.set_description('Generating video')
                    visualize(config, model, epoch, n_particles,
                              images, positions, groups, True)
                    did_vis = True

            train_loss = np.average(epoch_train_losses)
            train_pos_loss = np.average(epoch_train_pos_losses)
            train_grp_loss = np.average(epoch_train_grp_losses)

            print(('- Finish training epoch {:d}, training loss {:f},'
                   ' pos loss {:f}, group loss {:f}').format(
                epoch, train_loss, train_pos_loss, train_grp_loss))

            # validation
            print('- Evaluating epoch {:d}'.format(epoch))
            epoch_valid_losses = []
            epoch_valid_pos_losses = []
            epoch_valid_grp_losses = []

            pbar = tqdm(valid_loader)
            did_vis = False
            for images, positions, groups in pbar:
                with torch.no_grad():
                    (model, _, valid_loss,
                     valid_pos_loss, valid_grp_loss) = step(
                        config, model, optimizer,
                        images, positions, groups, False)
                epoch_valid_losses.append(valid_loss)
                epoch_valid_pos_losses.append(valid_pos_loss)
                epoch_valid_grp_losses.append(valid_grp_loss)
                pbar.set_description('Valid loss {:f}'.format(valid_loss))

                # visualize validation results
                if not did_vis \
                        and config.vis and (epoch + 1) % config.vis_every == 0:
                    pbar.set_description('Generating video')
                    visualize(config, model, epoch, n_particles,
                              images, positions, groups, False)
                    did_vis = True

            valid_loss = np.average(epoch_valid_losses)
            valid_pos_loss = np.average(epoch_valid_pos_losses)
            valid_grp_loss = np.average(epoch_valid_grp_losses)

            print('- Finish eval epoch {:d}, validation loss {:f}'.format(
                epoch, valid_loss))
            if valid_loss < best_valid_loss:
                print('- Best model')
                best_valid_loss = valid_loss
                torch.save(model.state_dict(),
                           os.path.join(config.run_dir, 'checkpoint_best.pth'))
            torch.save(model.state_dict(),
                       os.path.join(config.run_dir, 'checkpoint_latest.pth'))
            print()

            stats['epoch'].append(epoch)
            stats['train_losses'].append(train_loss)
            stats['valid_losses'].append(valid_loss)
            stats['train_pos_losses'].append(train_pos_loss)
            stats['train_group_losses'].append(train_grp_loss)
            stats['valid_pos_losses'].append(valid_pos_loss)
            stats['valid_group_losses'].append(valid_grp_loss)
            with open(os.path.join(config.run_dir, 'stats.json'), 'w') as fout:
                json.dump(stats, fout)

            # Plot loss curves
            plot_dir = os.path.join(config.run_dir, 'curves')
            if not os.path.isdir(plot_dir):
                os.makedirs(plot_dir)
            utils.plot_curves(
                x=stats['epoch'],
                ys=[stats['train_losses'], stats['valid_losses']],
                save_path=os.path.join(plot_dir, 'loss.png'),
                curve_labels=['train', 'valid'],
                x_label='epoch',
                y_label='total_loss',
                title='Total loss')
            utils.plot_curves(
                x=stats['epoch'],
                ys=[stats['train_pos_losses'], stats['valid_pos_losses']],
                save_path=os.path.join(plot_dir, 'loss_pos.png'),
                curve_labels=['train', 'valid'],
                x_label='epoch',
                y_label='pos_loss',
                title='Position loss')
            utils.plot_curves(
                x=stats['epoch'],
                ys=[stats['train_group_losses'], stats['valid_group_losses']],
                save_path=os.path.join(plot_dir, 'loss_grp.png'),
                curve_labels=['train', 'valid'],
                x_label='epoch',
                y_label='grp_loss',
                title='Grouping loss')

    else:  # Debug on a single batch
        images, positions, groups = next(iter(train_loader))
        images = images[:5, :15, ...]
        positions = positions[:5, :15, ...]
        groups = groups[:5, ...]
        for epoch in range(config.n_epochs):
            (model, optimizer, train_loss,
             train_pos_loss, train_grp_loss) = step(
                config, model, optimizer, images, positions, groups, True)
            print(train_loss, train_pos_loss, train_grp_loss)
Пример #13
0
def main():
    print(config.get_default("connection_pool"))
    config.set_default(connection_pool=20)
    print(config.get_default("connection_pool"))
Пример #14
0
	def __init__(self, properties):
		self.prefix = config.get_default("bot", "prefix", '`')
		self.properties = properties
Пример #15
0
def main(argv):
	global connection

	############### Argument Parsing ###############
	i = 1
	while i < len(argv):
		if arg_matches(argv[i], '--port', '-p'):
			i += 1
			try: connection['port'] = int(argv[i])
			except ValueError: print "* Ignoring invalid port number:", argv[i]

		elif arg_matches(argv[i], '--ssl', '-s'):
			connection['ssl'] = True

		elif arg_matches(argv[i], '--username', '-u'):
			i += 1
			connection['username'] = argv[i]

		elif arg_matches(argv[i], '--nickname', '-n'):
			i += 1
			connection['nickname'] = argv[i]

		elif arg_matches(argv[i], '--password', '-P'):
			i += 1
			connection['password'] = argv[i]

		elif arg_matches(argv[i], '--foreground', '-f'):
			runtime['foreground'] = True

		elif arg_matches(argv[i], '--config', '-c'):
			i += 1
			runtime['config'] = argv[i]

		elif arg_matches(argv[i], '--help', '-h'):
			print_help()
			return

		elif arg_matches(argv[i], '--version'):
			version()
			return

		elif (i == len(argv) - 1):
			connection['addr'] = argv[i]

		else:
			print "Unrecognized argument:", argv[i]

		i += 1

	############### Configuration ###############

	config.load(runtime['config'])

	# connection details need to be resolved before we can do anything!

	if connection['addr'] is None:
		connection['addr'] = config.get("server", "address")

	# complain if we still don't have an address, then we die
	if connection['addr'] is None:
		print "Error: telepathy failed, unable to connect to the address you didn't provide."
		return 1

	if not connection['port']:
		try:
			connection['port'] = int(config.get_default("server", "port", '6667'))
		except ValueError:
			print "Error: parameter to --port (-p) must be an integer."
			return 1

	############### Real Work ###############
	factory = greenbot.GreenbotFactory(connection)

	if not runtime['foreground']: # this is a daemon; get forking...
		print "* Forking into background..."
		pid = os.fork()

	if runtime['foreground'] or pid == 0: # child process should run in the background
		if not runtime['foreground']: os.setsid()

		# initiate the connection to the server
		greenbot.start(connection['addr'], connection['port'], factory, connection['ssl'])
Пример #16
0
directory = args.pop_get("--directory", default="weights")

# Distribute
vgpus = int(args.pop_get("--vgpu", default=1))
memory_limit = int(args.pop_get("--vram", default=12000))
gpus = args.pop_get("--gpus", default=None)
distribute = create_distribute(
    vgpus=vgpus, memory_limit=memory_limit, gpus=gpus)

# Pick up flags first
initialize_only = args.pop_check("--initialize")

# Default params
strategy = args.pop_get("--strategy", "repeat")
policy = args.pop_get("--policy", "rnnprop")
default = get_default(strategy=strategy, policy=policy)

# Build overrides
presets = args.pop_get("--presets", "")
overrides = []
if presets != "":
    for p in presets.split(','):
        overrides += get_preset(p)
overrides += args.to_overrides()

with distribute.scope():
    # Build strategy
    strategy = l2o.build(
        default, overrides, directory=directory, strict=True)

    # Train if not --initialize