def do_ping(service): # get service data (url and param_name) url,param_name = services[service] # Prepare input date sitemap_url = model.get_config().url + 'sitemap.xml' form_fields = {param_name: sitemap_url} form_data = urllib.urlencode(form_fields) # invoke the url fetch result = urlfetch.fetch(url,payload=form_data,follow_redirects=True) # return status code return result.status_code
def twit(key): config = model.get_config() post = model.get_post_by_key(key) if not post.as_draft: auth = tweepy.OAuthHandler(config.consumer_key, config.consumer_secret) auth.set_access_token(config.access_key, config.access_secret) api = tweepy.API(auth) post_url = config.url + post.slug msg = post.title + " " + post_url api.update_status(msg) return msg else: return "draft!"
def main(args=sys.argv[1:]): parser = argparse.ArgumentParser(description='Studio WebUI server. \ Usage: studio \ <arguments>') parser.add_argument('--config', help='configuration file', default=None) # parser.add_argument('--guest', # help='Guest mode (does not require db credentials)', # action='store_true') parser.add_argument('--port', help='port to run Flask server on', type=int, default=5000) parser.add_argument('--host', help='host name.', default='0.0.0.0') parser.add_argument('--verbose', '-v', help='Verbosity level. Allowed vaules: ' + 'debug, info, warn, error, crit ' + 'or numerical value of logger levels.', default=None) args = parser.parse_args(args) config = model.get_config() if args.config: with open(args.config) as f: config = yaml.load(f) if args.verbose: config['verbose'] = args.verbose # if args.guest: # config['database']['guest'] = True global _config global _db_provider _config = config _db_provider = model.get_db_provider(_config, blocking_auth=False) getlogger().setLevel(model.parse_verbosity(config.get('verbose'))) global _save_auth_cookie _save_auth_cookie = True print('Starting Studio UI on port {0}'.format(args.port)) app.run(host=args.host, port=args.port)
def __init__(self, args): self.config = model.get_config() if args.config: if isinstance(args.config, basestring): with open(args.config) as f: self.config.update(yaml.load(f)) else: self.config.update(args.config) if args.guest: self.config['database']['guest'] = True self.db = model.get_db_provider(self.config) self.logger = logging.getLogger('LocalExecutor') self.logger.setLevel(model.parse_verbosity(self.config.get('verbose'))) self.logger.debug("Config: ") self.logger.debug(self.config)
def main(args=sys.argv[1:]): logger = logs.getLogger('studio-runner') parser = argparse.ArgumentParser( description='Studio runner. \ Usage: studio run <runner_arguments> \ script <script_arguments>') parser.add_argument('--config', help='configuration file', default=None) parser.add_argument('--project', help='name of the project', default=None) parser.add_argument( '--experiment', '-e', help='Name of the experiment. If none provided, ' + 'random uuid will be generated', default=None) parser.add_argument( '--guest', help='Guest mode (does not require db credentials)', action='store_true') parser.add_argument( '--force-git', help='If run in a git directory, force running the experiment ' + 'even if changes are not commited', action='store_true') parser.add_argument( '--gpus', help='Number of gpus needed to run the experiment', type=int, default=None) parser.add_argument( '--cpus', help='Number of cpus needed to run the experiment' + ' (used to configure cloud instance)', type=int, default=None) parser.add_argument( '--ram', help='Amount of RAM needed to run the experiment' + ' (used to configure cloud instance), ex: 10G, 10GB', default=None) parser.add_argument( '--gpuMem', help='Amount of GPU RAM needed to run the experiment', default=None) parser.add_argument( '--hdd', help='Amount of hard drive space needed to run the experiment' + ' (used to configure cloud instance), ex: 10G, 10GB', default=None) parser.add_argument( '--queue', '-q', help='Name of the remote execution queue', default=None) parser.add_argument( '--cloud', help='Cloud execution mode. Could be gcloud, gcspot, ec2 or ec2spot', default=None) parser.add_argument( '--bid', help='Spot instance price bid, specified in USD or in percentage ' + 'of on-demand instance price. Default is %(default)s', default='100%') parser.add_argument( '--capture-once', '-co', help='Name of the immutable artifact to be captured. ' + 'It will be captured once before the experiment is run', default=[], action='append') parser.add_argument( '--capture', '-c', help='Name of the mutable artifact to be captured continuously', default=[], action='append') parser.add_argument( '--reuse', '-r', help='Name of the artifact from another experiment to use', default=[], action='append') parser.add_argument( '--verbose', '-v', help='Verbosity level. Allowed values: ' + 'debug, info, warn, error, crit ' + 'or numerical value of logger levels.', default=None) parser.add_argument( '--metric', help='Metric to show in the summary of the experiment, ' + 'and to base hyperparameter search on. ' + 'Refers a scalar value in tensorboard log ' + 'example: --metric=val_loss[:final | :min | :max] to report ' + 'validation loss in the end of the keras experiment ' + '(or smallest or largest throughout the experiment for :min ' + 'and :max respectively)', default=None) parser.add_argument( '--hyperparam', '-hp', help='Try out multiple values of a certain parameter. ' + 'For example, --hyperparam=learning_rate:0.01:0.1:l10 ' + 'will instantiate 10 versions of the script, replace ' + 'learning_rate with a one of the 10 values for learning ' + 'rate that lies on a log grid from 0.01 to 0.1, create ' 'experiments and place them in the queue.', default=[], action='append') parser.add_argument( '--num-workers', help='Number of local or cloud workers to spin up', type=int, default=None) parser.add_argument( '--python-pkg', help='Python package not present in the current environment ' + 'that is needed for experiment. Only compatible with ' + 'remote and cloud workers for now', default=[], action='append') parser.add_argument( '--ssh-keypair', help='Name of the SSH keypair used to access the EC2 ' + 'instances directly', default=None) parser.add_argument( '--optimizer', '-opt', help='Name of optimizer to use, by default is grid search. ' + 'The name of the optimizer must either be in ' + 'studio/optimizer_plugins ' + 'directory or the path to the optimizer source file ' + 'must be supplied. ', default='grid') parser.add_argument( '--cloud-timeout', help="Time (in seconds) that cloud workers wait for messages. " + "If negative, " + "wait for the first message in the queue indefinitely " + "and shut down " + "as soon as no new messages are available. " + "If zero, don't wait at all." + "Default value is %(default)d", type=int, default=300) parser.add_argument( '--user-startup-script', help='Path of script to run immediately ' + 'before running the remote worker', default=None) parser.add_argument( '--branch', help='Branch of studioml to use when running remote worker, useful ' + 'for debugging pull requests. Default is current', default=None) parser.add_argument( '--max-duration', help='Max experiment runtime (i.e. time after which experiment ' + 'should be killed no matter what.). Examples of values ' + 'might include 5h, 48h2m10s', default=None) parser.add_argument( '--lifetime', help='Max experiment lifetime (i.e. wait time after which ' + 'experiment loses relevance and should not be started)' + ' Examples include 240h30m10s', default=None) parser.add_argument( '--container', help='Singularity container in which experiment should be run. ' + 'Assumes that container has all dependencies installed', default=None ) parser.add_argument( '--port', help='Ports to open on a cloud instance', default=[], action='append' ) # detect which argument is the script filename # and attribute all arguments past that index as related to the script (runner_args, other_args) = parser.parse_known_args(args) py_suffix_args = [i for i, arg in enumerate(args) if arg.endswith('.py') or '::' in arg] rerun = False if len(py_suffix_args) < 1: print('None of the arugments end with .py') if len(other_args) == 0: print("Trying to run a container job") assert runner_args.container is not None exec_filename = None elif len(other_args) == 1: print("Treating last argument as experiment key to rerun") rerun = True experiment_key = args[-1] else: print("Too many extra arguments - should be either none " + "for container job or one for experiment re-run") sys.exit(1) else: script_index = py_suffix_args[0] exec_filename, other_args = args[script_index], args[script_index + 1:] runner_args = parser.parse_args(args[:script_index]) # TODO: Queue the job based on arguments and only then execute. config = model.get_config(runner_args.config) if runner_args.verbose: config['verbose'] = runner_args.verbose if runner_args.guest: config['database']['guest'] = True if runner_args.container: runner_args.capture_once.append( runner_args.container + ':_singularity') verbose = model.parse_verbosity(config['verbose']) logger.setLevel(verbose) if git_util.is_git() and not git_util.is_clean() and not rerun: logger.warn('Running from dirty git repo') if not runner_args.force_git: logger.error( 'Specify --force-git to run experiment from dirty git repo') sys.exit(1) resources_needed = parse_hardware(runner_args, config['resources_needed']) logger.debug('resources requested: ') logger.debug(str(resources_needed)) artifacts = {} artifacts.update(parse_artifacts(runner_args.capture, mutable=True)) artifacts.update(parse_artifacts(runner_args.capture_once, mutable=False)) with model.get_db_provider(config) as db: artifacts.update(parse_external_artifacts(runner_args.reuse, db)) if runner_args.branch: config['cloud']['branch'] = runner_args.branch if runner_args.user_startup_script: config['cloud']['user_startup_script'] = \ runner_args.user_startup_script if runner_args.lifetime: config['experimentLifetime'] = runner_args.lifetime if any(runner_args.hyperparam): if runner_args.optimizer is "grid": experiments = add_hyperparam_experiments( exec_filename, other_args, runner_args, artifacts, resources_needed, logger) queue_name = submit_experiments( experiments, config=config, logger=logger, queue_name=runner_args.queue, cloud=runner_args.cloud) spin_up_workers( runner_args, config, resources_needed, queue_name=queue_name, verbose=verbose) else: opt_modulepath = os.path.join( os.path.dirname(os.path.abspath(__file__)), "optimizer_plugins", runner_args.optimizer + ".py") if not os.path.exists(opt_modulepath): opt_modulepath = os.path.abspath( os.path.expanduser(runner_args.optimizer)) logger.info('optimizer path: %s' % opt_modulepath) assert os.path.exists(opt_modulepath) sys.path.append(os.path.dirname(opt_modulepath)) opt_module = importlib.import_module( os.path.basename(opt_modulepath.replace(".py", ''))) h = HyperparameterParser(runner_args, logger) hyperparams = h.parse() optimizer = getattr( opt_module, "Optimizer")( hyperparams, config['optimizer'], logger) workers_started = False queue_name = runner_args.queue while not optimizer.stop(): hyperparam_pop = optimizer.ask() hyperparam_tuples = h.convert_to_tuples(hyperparam_pop) experiments = add_hyperparam_experiments( exec_filename, other_args, runner_args, artifacts, resources_needed, logger, optimizer=optimizer, hyperparam_tuples=hyperparam_tuples) queue_name = submit_experiments( experiments, config=config, logger=logger, cloud=runner_args.cloud, queue_name=queue_name) if not workers_started: spin_up_workers( runner_args, config, resources_needed, queue_name=queue_name, verbose=verbose) workers_started = True fitnesses, behaviors = get_experiment_fitnesses( experiments, optimizer, config, logger) # for i, hh in enumerate(hyperparam_pop): # print fitnesses[i] # for hhh in hh: # print hhh try: optimizer.tell(hyperparam_pop, fitnesses, behaviors) except BaseException: optimizer.tell(hyperparam_pop, fitnesses) try: optimizer.disp() except BaseException: logger.warn('Optimizer has no disp() method') else: if rerun: with model.get_db_provider(config) as db: experiment = db.get_experiment(experiment_key) new_key = runner_args.experiment if runner_args.experiment \ else experiment_key + '_rerun' + str(uuid.uuid4()) experiment.key = new_key for _, art in six.iteritems(experiment.artifacts): art['mutable'] = False experiments = [experiment] else: experiments = [create_experiment( filename=exec_filename, args=other_args, experiment_name=runner_args.experiment, project=runner_args.project, artifacts=artifacts, resources_needed=resources_needed, metric=runner_args.metric, max_duration=runner_args.max_duration, )] queue_name = submit_experiments( experiments, config=config, logger=logger, cloud=runner_args.cloud, queue_name=runner_args.queue) spin_up_workers( runner_args, config, resources_needed, queue_name=queue_name, verbose=verbose) return
def main(args=sys.argv): logger = logging.getLogger('studio-runner') parser = argparse.ArgumentParser( description='Studio runner. \ Usage: studio run <runner_arguments> \ script <script_arguments>') parser.add_argument('--config', help='configuration file', default=None) parser.add_argument('--project', help='name of the project', default=None) parser.add_argument( '--experiment', '-e', help='Name of the experiment. If none provided, ' + 'random uuid will be generated', default=None) parser.add_argument( '--guest', help='Guest mode (does not require db credentials)', action='store_true') parser.add_argument( '--force-git', help='If run in a git directory, force running the experiment ' + 'even if changes are not commited', action='store_true') parser.add_argument( '--gpus', help='Number of gpus needed to run the experiment', default=None) parser.add_argument( '--cpus', help='Number of cpus needed to run the experiment' + ' (used to configure cloud instance)', default=None) parser.add_argument( '--ram', help='Amount of RAM needed to run the experiment' + ' (used to configure cloud instance)', default=None) parser.add_argument( '--hdd', help='Amount of hard drive space needed to run the experiment' + ' (used to configure cloud instance)', default=None) parser.add_argument( '--queue', '-q', help='Name of the remote execution queue', default=None) parser.add_argument( '--cloud', help='Cloud execution mode. Could be gcloud, ec2 or ec2spot', default=None) parser.add_argument( '--bid', help='Spot instance price bid, specified in USD or in percentage ' + 'of on-demand instance price. Default is %(default)s', default='100%') parser.add_argument( '--capture-once', '-co', help='Name of the immutable artifact to be captured. ' + 'It will be captured once before the experiment is run', default=[], action='append') parser.add_argument( '--capture', '-c', help='Name of the mutable artifact to be captured continuously', default=[], action='append') parser.add_argument( '--reuse', '-r', help='Name of the artifact from another experiment to use', default=[], action='append') parser.add_argument( '--verbose', '-v', help='Verbosity level. Allowed values: ' + 'debug, info, warn, error, crit ' + 'or numerical value of logger levels.', default=None) parser.add_argument( '--metric', '-m', help='Metric to show in the summary of the experiment, ' + 'and to base hyperparameter search on. ' + 'Refers a scalar value in tensorboard log ' + 'example: --metric=val_loss[:final | :min | :max] to report ' + 'validation loss in the end of the keras experiment ' + '(or smallest or largest throughout the experiment for :min ' + 'and :max respectively)', default=None) parser.add_argument( '--hyperparam', '-hp', help='Try out multiple values of a certain parameter. ' + 'For example, --hyperparam=learning_rate:0.01:0.1:l10 ' + 'will instantiate 10 versions of the script, replace ' + 'learning_rate with a one of the 10 values for learning ' + 'rate that lies on a log grid from 0.01 to 0.1, create ' 'experiments and place them in the queue.', default=[], action='append') parser.add_argument( '--num-workers', help='Number of local or cloud workers to spin up', default=None) parser.add_argument( '--python-pkg', help='Python package not present in the current environment ' + 'that is needed for experiment. Only compatible with ' + 'remote and cloud workers for now', default=[], action='append') parser.add_argument( '--ssh-keypair', help='Name of the SSH keypair used to access the EC2 ' + 'instances directly', default=None) parser.add_argument( '--optimizer', '-opt', help='Name of optimizer to use, by default is grid search. ' + 'The name of the optimizer must either be in ' + 'studio/optimizer_plugins ' + 'directory or the path to the optimizer source file ' + 'must be supplied. ', default='grid') parser.add_argument( '--cloud-timeout', help="Time (in seconds) that cloud workers wait for messages. " + "If negative, " + "wait for the first message in the queue indefinitely " + "and shut down " + "as soon as no new messages are available. " + "If zero, don't wait at all." + "Default value is %(default)", type=int, default=300) # detect which argument is the script filename # and attribute all arguments past that index as related to the script py_suffix_args = [i for i, arg in enumerate(args) if arg.endswith('.py')] if len(py_suffix_args) < 1: print('At least one argument should be a python script ' + '(end with *.py)') parser.print_help() exit() script_index = py_suffix_args[0] runner_args = parser.parse_args(args[1:script_index]) exec_filename, other_args = args[script_index], args[script_index + 1:] # TODO: Queue the job based on arguments and only then execute. config = model.get_config(runner_args.config) if runner_args.verbose: config['verbose'] = runner_args.verbose verbose = model.parse_verbosity(config['verbose']) logger.setLevel(verbose) db = model.get_db_provider(config) if git_util.is_git() and not git_util.is_clean(): logger.warn('Running from dirty git repo') if not runner_args.force_git: logger.error( 'Specify --force-git to run experiment from dirty git repo') sys.exit(1) resources_needed = parse_hardware(runner_args, config['cloud']) logger.debug('resources requested: ') logger.debug(str(resources_needed)) artifacts = {} artifacts.update(parse_artifacts(runner_args.capture, mutable=True)) artifacts.update(parse_artifacts(runner_args.capture_once, mutable=False)) artifacts.update(parse_external_artifacts(runner_args.reuse, db)) if any(runner_args.hyperparam): if runner_args.optimizer is "grid": experiments = add_hyperparam_experiments( exec_filename, other_args, runner_args, artifacts, resources_needed) submit_experiments( experiments, config, runner_args, logger, resources_needed) else: opt_modulepath = os.path.join( os.path.dirname(os.path.abspath(__file__)), "optimizer_plugins", runner_args.optimizer + ".py") # logger.info('optimizer path: %s' % opt_modulepath) if not os.path.exists(opt_modulepath): opt_modulepath = os.path.abspath( os.path.expanduser(runner_args.optimizer)) logger.info('optimizer path: %s' % opt_modulepath) assert os.path.exists(opt_modulepath) sys.path.append(os.path.dirname(opt_modulepath)) opt_module = importlib.import_module( os.path.basename(opt_modulepath.replace(".py", ''))) hyperparam_values, log_scale_dict = get_hyperparam_values( runner_args) optimizer = getattr(opt_module, "Optimizer")(hyperparam_values, log_scale_dict) while not optimizer.stop(): hyperparam_tuples = optimizer.ask() experiments = add_hyperparam_experiments( exec_filename, other_args, runner_args, artifacts, resources_needed, optimizer=optimizer, hyperparam_tuples=hyperparam_tuples) submit_experiments( experiments, config, runner_args, logger, resources_needed) fitnesses = get_experiment_fitnesses(experiments, optimizer, config, logger) optimizer.tell(hyperparam_tuples, fitnesses) # if config['verbose'] == "info" or config['verbose'] == # "debug": try: optimizer.disp() except BaseException: logger.warn('Optimizer has no disp() method') else: experiments = [model.create_experiment( filename=exec_filename, args=other_args, experiment_name=runner_args.experiment, project=runner_args.project, artifacts=artifacts, resources_needed=resources_needed, metric=runner_args.metric)] submit_experiments( experiments, config, runner_args, logger, resources_needed) db = None return
def render(template_name, **kwargs): user = users.get_current_user() config = model.get_config() return render_template(template_name, config=config, user=user, **kwargs)
import traceback import six import google.oauth2.id_token import google.auth.transport.requests from experiment import experiment_from_dict import logs app = Flask(__name__) DB_PROVIDER_EXPIRATION = 1800 _db_provider_timestamp = None _db_provider = None _config = model.get_config() _tensorboard_dirs = {} _grequest = google.auth.transport.requests.Request() _save_auth_cookie = False logger = None @app.route('/') def dashboard(): return _render('dashboard.html') @app.route('/projects') def projects():