def save_body(path, d, m, env): if d['type'] == 'leaf': original = copy.copy(m) model.check(path, m) errors = [] if m['to'] and m['from'] and m['to'] < m['from']: errors.extend(('to', 'from')) if (not 'load' in original or original['load'] == '') and (not 'effort' in original or original['effort'] == ''): errors.extend(('load', 'effort')) if len(errors) > 0: raise model.ParseException(original, errors) model.create(path) model.save(path, m) return "\n", model.parent(path) if d['type'] == 'list' and 'name' in m: name = m['name'] name = name.replace('?', '') name = name.replace('/', '') name = name.replace('#', '') new_path = path + "/" + name if path.strip() == '': return "\n", path model.create(new_path) if model.describe(new_path)['type'] == 'leaf' or model.describe( new_path)['type'] == 'render': model.save(new_path, {'name': m['name']}) return "\n", new_path return None, path
def new_company(company_name, email, password): success = 0 table_title = 'pkp_companies' list = ['company_name', 'email', 'password', 'password_try'] values = [company_name, email, password, 0] create(table_title, list, values) return success
def getAdministratorInfo(): new = False #boolean to allow for using new/load old data socketChat = False #boolean to allow for socket convo adminIn = input( "Would you like to train the agent on new data/create a new agent? (y/n) " ) #adminIn = 'y' if adminIn == 'y': new = True if new: data.read() config.createModel() model.create(config.training, config.output) print("Model created") else: data.load() config.createModel() model.load() print("Model loaded") socket = input( "Would you like to watch a conversation with another chatbot? (y/n) ") if socket == 'y': socketChat = True if socketChat: config.socketchat = True else: config.socketchat = False
def save_body(path, d, m, env): if d['type'] == 'leaf': original=copy.copy(m) model.check(path, m) errors = [] if m['to'] and m['from'] and m['to'] < m['from']: errors.extend( ('to','from') ) if (not 'load' in original or original['load'] == '') and (not 'effort' in original or original['effort'] == ''): errors.extend( ('load', 'effort') ) if len(errors) > 0: raise model.ParseException(original, errors) model.create(path) model.save(path, m) return "\n", model.parent(path) if d['type'] == 'list' and 'name' in m: name = m['name'] name = name.replace('?', '') name = name.replace('/', '') name = name.replace('#', '') new_path = path+"/"+name if path.strip() == '': return "\n", path model.create(new_path) if model.describe(new_path)['type'] == 'leaf' or model.describe(new_path)['type'] == 'render': model.save(new_path, { 'name': m['name'] }) return "\n", new_path return None, path
def create_account(): if request.method == 'GET': return render_template('create_account.html') else: submitted_username = request.form['username'] submitted_password = request.form['password'] submitted_balance = 10000 #hard coded for assessment request.form['balance'] model.create(submitted_username, submitted_password, submitted_balance) return redirect('/terminal/login')
def test_success(self): script = ConfigureCollectionScript() l1, ignore = create( self._db, Library, name="Library 1", short_name="L1", ) l2, ignore = create( self._db, Library, name="Library 2", short_name="L2", ) l3, ignore = create( self._db, Library, name="Library 3", short_name="L3", ) self._db.commit() # Create a collection, set all its attributes, set a custom # setting, and associate it with two libraries. output = StringIO() script.do_run( self._db, ["--name=New Collection", "--protocol=Overdrive", "--library=L2", "--library=L1", "--setting=library_id=1234", "--external-account-id=acctid", "--url=url", "--username=username", "--password=password", ], output ) # The collection was created and configured properly. collection = get_one(self._db, Collection) eq_("New Collection", collection.name) eq_("url", collection.external_integration.url) eq_("acctid", collection.external_account_id) eq_("username", collection.external_integration.username) eq_("password", collection.external_integration.password) # Two libraries now have access to the collection. eq_([collection], l1.collections) eq_([collection], l2.collections) eq_([], l3.collections) # One CollectionSetting was set on the collection. [setting] = collection.external_integration.settings eq_("library_id", setting.key) eq_("1234", setting.value) # The output explains the collection settings. expect = ("Configuration settings stored.\n" + "\n".join(collection.explain()) + "\n") eq_(expect, output.getvalue())
def test_collect_event(self): sitewide_integration, ignore = create( self._db, ExternalIntegration, goal=ExternalIntegration.ANALYTICS_GOAL, protocol="mock_analytics_provider") library, ignore = create(self._db, Library, short_name="library") library_integration, ignore = create( self._db, ExternalIntegration, goal=ExternalIntegration.ANALYTICS_GOAL, protocol="mock_analytics_provider", ) library_integration.libraries += [library] work = self._work(title="title", with_license_pool=True) [lp] = work.license_pools analytics = Analytics(self._db) sitewide_provider = analytics.sitewide_providers[0] library_provider = analytics.library_providers[library.id][0] analytics.collect_event(self._default_library, lp, CirculationEvent.DISTRIBUTOR_CHECKIN, None) # The sitewide provider was called. eq_(1, sitewide_provider.count) eq_(CirculationEvent.DISTRIBUTOR_CHECKIN, sitewide_provider.event_type) # The library provider wasn't called, since the event was for a different library. eq_(0, library_provider.count) analytics.collect_event(library, lp, CirculationEvent.DISTRIBUTOR_CHECKIN, None) # Now both providers were called, since the event was for the library provider's library. eq_(2, sitewide_provider.count) eq_(1, library_provider.count) eq_(CirculationEvent.DISTRIBUTOR_CHECKIN, library_provider.event_type) # Here's an event that we couldn't associate with any # particular library. analytics.collect_event(None, lp, CirculationEvent.DISTRIBUTOR_CHECKOUT, None) # It's counted as a sitewide event, but not as a library event. eq_(3, sitewide_provider.count) eq_(1, library_provider.count)
def run(args): """Load the data, train, evaluate, and export the model for serving and evaluating. Args: args: experiment parameters. """ torch.manual_seed(args.seed) # Open our dataset train_loader, test_loader, eval_loader = inputs.load_data(args) # Create the model, loss function, and optimizer sequential_model, criterion, optimizer = model.create(args) # Train / Test the model for epoch in range(1, args.num_epochs + 1): train(sequential_model, train_loader, criterion, optimizer, epoch) test(sequential_model, test_loader, criterion) # Evalutate the model print("Evaluate the model using the evaluation dataset") test(sequential_model, eval_loader, criterion) # Export the trained model torch.save(sequential_model.state_dict(), args.model_name) # Save the model to GCS if args.job_dir: inputs.save_model(args)
def run_prob(down_station, input_list, include_time, sample_size, network_type, nr_layers, nr_units): """Runner""" start_time_run = time.time() result_dir = util.get_result_dir(down_station, network_type, nr_layers, nr_units, sample_size) (y_train, x_train, y_cv, x_cv, _, _, _, _, _, _, _, _, _, _, _) = data.construct(down_station, input_list, include_time, sample_size, network_type) input_dim = 0 input_dim_2 = 0 if (network_type == 'bnn'): (_, input_dim, input_dim_2) = x_train.shape else: (_, input_dim) = x_train.shape my_model = model.create(result_dir, input_dim, nr_layers, nr_units, network_type, input_dim_2) trained_model_file = util.model_file_name_lowest_cv(result_dir) my_model.load_weights(trained_model_file, by_name=True) print(my_model.get_config()) elapsed_time_run = time.time() - start_time_run print( time.strftime("Training time : %H:%M:%S", time.gmtime(elapsed_time_run)))
def test_handle_click(postgresql_with_schema): """Test handle_click function.""" word = 'こんにちは' word_id = model.create(postgresql_with_schema, 'dummy_chat_id', word) with pytest.raises(ValueError): model.handle_click(postgresql_with_schema, int(word_id) + 1) # ID does not exist. link = model.handle_click(postgresql_with_schema, int(word_id)) assert link == model._full_link(model.JISHO_URL, word) cur = postgresql_with_schema.cursor() cur.execute(r'select word, clicks from links where id=%s;', (word_id, )) row = cur.fetchone() assert row[0] == word assert row[1] == 1 link = model.handle_click(postgresql_with_schema, int(word_id)) assert link == model._full_link(model.JISHO_URL, word) cur = postgresql_with_schema.cursor() cur.execute(r'select word, clicks from links where id=%s;', (word_id, )) row = cur.fetchone() assert row[0] == word assert row[1] == 2
def test_reconfigure_library(self): # The library exists. library, ignore = create( self._db, Library, name="Library 1", short_name="L1", ) script = ConfigureLibraryScript() output = StringIO() # We're going to change one value and add some more. script.do_run( self._db, [ "--short-name=L1", "--name=Library 1 New Name", "--random-library-registry-shared-secret", "--library-registry-short-name=nyl1", ], output ) eq_("Library 1 New Name", library.name) eq_("NYL1", library.library_registry_short_name) # The shared secret was randomly generated, so we can't test # its exact value, but we do know it's a string that can be # converted into a hexadecimal number. assert library.library_registry_shared_secret != None int(library.library_registry_shared_secret, 16) expect_output = "Configuration settings stored.\n" + "\n".join(library.explain()) + "\n" eq_(expect_output, output.getvalue())
def createid(): if request.method == 'GET': return render_template('createid.html') else: createn = request.form['createname'] createpass = request.form['createpassword'] createpass2 = request.form['createpassword2'] newobj = model.createcustomuser(createn, createpass, createpass2) if newobj == "Something wrong with ya input." or newobj == "Your password inputs do not match.": return render_template('createid.html', message='Bad input and stuff!') elif "Username already exists" in newobj: strreturn = 'Username already exists: ' + str(createn) return render_template('createid.html', message=strreturn) else: myid = newobj[3] myname = newobj[1] #mypass=newobj[3] idz1 = captureid(myid) bal, earn = model.create(idz1) mess = "Successful creation: " + str(myname) + "! What next?" return redirect( url_for("landing", message1=mess, balance=bal.balancez, earnings=earn.earningz, idno=idz1, xbal=bal, xearn=earn))
def askid(): if request.method == 'GET': return render_template('askid.html') else: #TODO if it is Post method idz = request.form['idask'] passwordz = request.form['passwordz'] ###idz== we have the ID now.. refer to model? #idno=model.userauth(idz) idno = model.authcustomuser(idz, passwordz) # myid=idno[1] # myname=idno[2] # mypass=idno[3] if idno == "Bad inputz" or idno == "Bad ID & password input": return render_template('askid.html', message='Bad input and stuff!') else: myid = idno[1] myname = idno[2] mypass = idno[3] idz1 = captureid(myid) bal, earn = model.create(idz1) mess = "Successful input user id: " + str(myname) + "! What next?" return redirect( url_for("landing", message1=mess, balance=bal.balancez, earnings=earn.earningz, idno=idz1, xbal=bal, xearn=earn))
def quote(balance, earnings, idno, xbal, xearn): if request.method == 'GET': return render_template('quote.html', balance=balance, earnings=earnings, idno=idno, xbal=xbal, xearn=xearn) else: idz1 = idno quoting = request.form['quotez'] x = model.quote(quoting) #zeez=x.split() if isinstance(x, str): message2 = "An error occured..." + x return render_template('quote.html', balance=balance, earnings=earnings, idno=idno, xbal=xbal, xearn=xearn, message2=message2) else: bal, earn = model.create(idz1) xstr = str(x) mess = "the quote is " + xstr + " for {}.".format(quoting) return redirect( url_for("landing", message1=mess, balance=bal.balancez, earnings=earn.earningz, idno=idno, xbal=bal, xearn=earn))
def test_collect_event(self): integration, ignore = create(self._db, ExternalIntegration, goal=ExternalIntegration.ANALYTICS_GOAL, protocol="core.local_analytics_provider") la = LocalAnalyticsProvider(integration) work = self._work(title="title", authors="author", fiction=True, audience="audience", language="lang", with_license_pool=True) [lp] = work.license_pools now = datetime.datetime.utcnow() la.collect_event(self._default_library, lp, CirculationEvent.DISTRIBUTOR_CHECKIN, now, old_value=None, new_value=None) [event] = self._db \ .query(CirculationEvent) \ .filter(CirculationEvent.type == CirculationEvent.DISTRIBUTOR_CHECKIN) \ .all() eq_(lp, event.license_pool) eq_(CirculationEvent.DISTRIBUTOR_CHECKIN, event.type) eq_(now, event.start)
def __init__(self, path): self.db = database.open(path) self.backreferences = registry.BackreferenceRegistry() self.instances = registry.ModelInstanceRegistry() self.models = {} self.context = None self.Model = model.create(self)
def _lane(self, display_name=None, library=None, parent=None, genres=None, languages=None, fiction=None ): display_name = display_name or self._str library = library or self._default_library lane, is_new = create( self._db, Lane, library=library, parent=parent, display_name=display_name, fiction=fiction ) if is_new and parent: lane.priority = len(parent.sublanes)-1 if genres: if not isinstance(genres, list): genres = [genres] for genre in genres: if isinstance(genre, basestring): genre, ignore = Genre.lookup(self._db, genre) lane.genres.append(genre) if languages: if not isinstance(languages, list): languages = [languages] lane.languages = languages return lane
def predict(self): import input import model input = input.load() model_pred = model.create(input) model_pred.predict(input)
def test_bad_arguments(self): script = ConfigureLibraryScript() library, ignore = create( self._db, Library, name="Library 1", short_name="L1", ) library.library_registry_shared_secret='secret' self._db.commit() assert_raises_regexp( ValueError, "You must identify the library by its short name.", script.do_run, self._db, [] ) assert_raises_regexp( ValueError, "Could not locate library 'foo'", script.do_run, self._db, ["--short-name=foo"] ) assert_raises_regexp( ValueError, "Cowardly refusing to overwrite an existing shared secret with a random value.", script.do_run, self._db, [ "--short-name=L1", "--random-library-registry-shared-secret" ] )
def create(): """ Create new short-link. Takes JSON object as an input, which should contain chat id as 'chat_id', word as 'word' and bot token hash as 'token_hash'. Returns the ID of newly created link. """ if not _initialized(): return _('Not started.'), 500 if not flask.request.is_json: return _('Request not in JSON format'), 400 data = flask.request.get_json() if not data: return _('No valid input provided'), 401 token_hash = data.get('token_hash') if not token_hash: return _('No bot token hash provided'), 401 if token_hash != _bot_token_hash: return _('Incorrect bot token hash'), 401 return str( model.create(_dbConnection, data.get('chat_id'), data.get('word')))
def lookup(balance, earnings, idno, xbal, xearn): if request.method == 'GET': return render_template('lookup.html', balance=balance, earnings=earnings, idno=idno, xbal=xbal, xearn=xearn) else: idz1 = idno companyname = request.form['company1'] x = model.lookup(companyname) zeez = x.split() if "Sorry," in zeez: message2 = "An error occured..." + x return render_template('lookup.html', balance=balance, earnings=earnings, idno=idno, xbal=xbal, xearn=xearn, message2=message2) else: bal, earn = model.create(idz1) mess = "your ticker is...." + x return redirect( url_for("landing", message1=mess, balance=bal.balancez, earnings=earn.earningz, idno=idno, xbal=bal, xearn=earn))
def test_reconfigure_collection(self): # The collection exists. collection, ignore = create( self._db, Collection, name="Collection 1", protocol=Collection.OVERDRIVE ) script = ConfigureCollectionScript() output = StringIO() # We're going to change one value and add a new one. script.do_run( self._db, [ "--name=Collection 1", "--url=foo", "--protocol=%s" % Collection.BIBLIOTHECA ], output ) # The collection has been changed. eq_("foo", collection.external_integration.url) eq_(Collection.BIBLIOTHECA, collection.protocol) expect = ("Configuration settings stored.\n" + "\n".join(collection.explain()) + "\n") eq_(expect, output.getvalue())
def test_bad_arguments(self): script = ConfigureCollectionScript() library, ignore = create( self._db, Library, name="Library 1", short_name="L1", ) self._db.commit() # Reference to a nonexistent collection without the information # necessary to create it. assert_raises_regexp( ValueError, 'No collection called "collection". You can create it, but you must specify a protocol.', script.do_run, self._db, ["--name=collection"] ) # Incorrect format for the 'setting' argument. assert_raises_regexp( ValueError, 'Incorrect format for setting: "key". Should be "key=value"', script.do_run, self._db, [ "--name=collection", "--protocol=Overdrive", "--setting=key" ] ) # Try to add the collection to a nonexistent library. assert_raises_regexp( ValueError, 'No such library: "nosuchlibrary". I only know about: "L1"', script.do_run, self._db, [ "--name=collection", "--protocol=Overdrive", "--library=nosuchlibrary" ] )
def add_spot(): if 'user_id' in session: if 'user_admin' not in session or session['user_admin'] is False: return redirect(url_for('status')) else: return redirect(url_for('admin')) if request.method == 'POST': data = request.form.to_dict(flat=True) if not data['location']: return router['add_spot'](['invalid'], data=model.garages()) model.create(model.Spot, data) return router['add_spot'](['success'], data=model.garages()) return router['add_spot']([], data=model.garages())
def test_load_integration(self): m = ConfigureIntegrationScript._integration assert_raises_regexp( ValueError, "An integration must by identified by either ID, name, or the combination of protocol and goal.", m, self._db, None, None, "protocol", None) assert_raises_regexp(ValueError, "No integration with ID notanid.", m, self._db, "notanid", None, None, None) assert_raises_regexp( ValueError, 'No integration with name "Unknown integration". To create it, you must also provide protocol and goal.', m, self._db, None, "Unknown integration", None, None) integration, ignore = create(self._db, ExternalIntegration, protocol="Protocol", goal="Goal") integration.name = "An integration" eq_(integration, m(self._db, integration.id, None, None, None)) eq_(integration, m(self._db, None, integration.name, None, None)) eq_(integration, m(self._db, None, None, "Protocol", "Goal")) # An integration may be created given a protocol and goal. integration2 = m(self._db, None, "I exist now", "Protocol", "Goal2") assert integration2 != integration eq_("Protocol", integration2.protocol) eq_("Goal2", integration2.goal) eq_("I exist now", integration2.name)
def setup(self): super(TestLocalAnalyticsProvider, self).setup() self.integration, ignore = create( self._db, ExternalIntegration, goal=ExternalIntegration.ANALYTICS_GOAL, protocol="core.local_analytics_provider") self.la = LocalAnalyticsProvider(self.integration, self._default_library)
def test_create(postgresql_with_schema): """Test create function.""" with pytest.raises(ValueError): model.create(postgresql_with_schema, '', 'こんにちは') with pytest.raises(ValueError): model.create(postgresql_with_schema, 'dummy_chat_id', '') with pytest.raises(ValueError): model.create(postgresql_with_schema, 'dummy_chat_id', 'こんにちは', provider='') assert model.create(postgresql_with_schema, 'dummy_chat_id', 'こんにちは') == 1 assert model.create(postgresql_with_schema, 'dummy_chat_id', '世界') == 2 assert model.create(postgresql_with_schema, 'dummy_chat_id', '世界') == 2 assert model.create(postgresql_with_schema, 'other_dummy_chat_id', '世界') == 3
def goTrain(self): import input import model input = input.load() model_train = model.create(input) model_train.fit(input) if not conf.path_model is None: model_train.save(conf.path_model)
def test_initialize(self): # supports multiple analytics providers, site-wide or with libraries # Two site-wide integrations mock_integration, ignore = create( self._db, ExternalIntegration, goal=ExternalIntegration.ANALYTICS_GOAL, protocol="mock_analytics_provider") mock_integration.url = self._str local_integration, ignore = create( self._db, ExternalIntegration, goal=ExternalIntegration.ANALYTICS_GOAL, protocol="local_analytics_provider") # A broken integration missing_integration, ignore = create( self._db, ExternalIntegration, goal=ExternalIntegration.ANALYTICS_GOAL, protocol="missing_provider") # Two library-specific integrations l1, ignore = create(self._db, Library, short_name="L1") l2, ignore = create(self._db, Library, short_name="L2") library_integration1, ignore = create( self._db, ExternalIntegration, goal=ExternalIntegration.ANALYTICS_GOAL, protocol="mock_analytics_provider") library_integration1.libraries += [l1, l2] library_integration2, ignore = create( self._db, ExternalIntegration, goal=ExternalIntegration.ANALYTICS_GOAL, protocol="mock_analytics_provider") library_integration2.libraries += [l2] analytics = Analytics(self._db) eq_(2, len(analytics.sitewide_providers)) assert isinstance(analytics.sitewide_providers[0], MockAnalyticsProvider) eq_(mock_integration.url, analytics.sitewide_providers[0].url) assert isinstance(analytics.sitewide_providers[1], LocalAnalyticsProvider) assert missing_integration.id in analytics.initialization_exceptions eq_(1, len(analytics.library_providers[l1.id])) assert isinstance(analytics.library_providers[l1.id][0], MockAnalyticsProvider) eq_(2, len(analytics.library_providers[l2.id])) for provider in analytics.library_providers[l2.id]: assert isinstance(provider, MockAnalyticsProvider)
def test_with_multiple_libraries(self): l1, ignore = create( self._db, Library, name="Library 1", short_name="L1", ) l1.library_registry_shared_secret="a" l2, ignore = create( self._db, Library, name="Library 2", short_name="L2", ) l2.library_registry_shared_secret="b" # The output of this script is the result of running explain() # on both libraries. output = StringIO() ShowLibrariesScript().do_run(self._db, output=output) expect_1 = "\n".join(l1.explain(include_library_registry_shared_secret=False)) expect_2 = "\n".join(l2.explain(include_library_registry_shared_secret=False)) eq_(expect_1 + "\n" + expect_2 + "\n", output.getvalue()) # We can tell the script to only list a single library. output = StringIO() ShowLibrariesScript().do_run( self._db, cmd_args=["--short-name=L2"], output=output ) eq_(expect_2 + "\n", output.getvalue()) # We can tell the script to include the library registry # shared secret. output = StringIO() ShowLibrariesScript().do_run( self._db, cmd_args=["--show-registry-shared-secret"], output=output ) expect_1 = "\n".join(l1.explain(include_library_registry_shared_secret=True)) expect_2 = "\n".join(l2.explain(include_library_registry_shared_secret=True)) eq_(expect_1 + "\n" + expect_2 + "\n", output.getvalue())
def _integration(self): """Configure a basic Vendor ID Service setup.""" integration, ignore = create( self._db, ExternalIntegration, protocol=ExternalIntegration.ADOBE_VENDOR_ID, goal=ExternalIntegration.DRM_GOAL, ) integration.setting(Configuration.ADOBE_VENDOR_ID).value = "VENDORID" integration.setting( Configuration.ADOBE_VENDOR_ID_NODE_VALUE).value = self.NODE_VALUE return integration
def sell(balance, earnings, idno, xbal, xearn): if request.method == 'GET': return render_template('sell.html', balance=balance, earnings=earnings, idno=idno, xbal=xbal, xearn=xearn) else: # x= model.buy(ticker_symbol, trade_volume, bal, earn, idno) # return x tickersym = request.form['tickersymz'] tradevolume = request.form['tradevol'] bal, earn = model.create(idno) x = model.sell(tickersym, tradevolume, bal, earn, idno) zeez = x.split() if "Sorry," in zeez: message2 = "An error occured..." + x return render_template('sell.html', balance=balance, earnings=earnings, idno=idno, xbal=xbal, xearn=xearn, message2=message2) else: mess = "Successfully sold ticker and volume." + x idz1 = captureid(idno) bal, earn = model.create(idz1) return redirect( url_for("landing", message1=mess, balance=bal.balancez, earnings=earn.earningz, idno=idno, xbal=bal, xearn=earn))
def main(): args = get_args() _setup_logging() # If job_dir_reuse is False then remove the job_dir if it exists logging.info("Resume training:", args.reuse_job_dir) if not args.reuse_job_dir: if tf.gfile.Exists(args.job_dir): tf.gfile.DeleteRecursively(args.job_dir) logging.info("Deleted job_dir {} to avoid re-use".format(args.job_dir)) else: logging.info("Reusing job_dir {} if it exists".format(args.job_dir)) run_config = experiment.create_run_config(args) logging.info("Job directory:", run_config.model_dir) # Compute the number of training steps if args.train_size is not None and args.num_epochs is not None: args.train_steps = int( (args.train_size / args.batch_size) * args.num_epochs) else: args.train_steps = args.train_steps logging.info("Train size: {}.".format(args.train_size)) logging.info("Epoch count: {}.".format(args.num_epochs)) logging.info("Batch size: {}.".format(args.batch_size)) logging.info("Training steps: {} ({}).".format( args.train_steps, "supplied" if args.train_size is None else "computed")) logging.info("Evaluate every {} steps.".format(args.eval_frequency_secs)) # Create the estimator estimator = model.create(args, run_config) logging.info("creating an estimator: {}".format(type(estimator))) # Run the train and evaluate experiment time_start = datetime.utcnow() logging.info("Experiment started...") logging.info(".......................................") # Run experiment experiment.run(estimator, args) time_end = datetime.utcnow() logging.info(".......................................") logging.info("Experiment finished.") time_elapsed = time_end - time_start logging.info( "Experiment elapsed time: {} seconds".format(time_elapsed.total_seconds()))
def create(network_input, keep_prob): """ Builds the convolutional network model (2x conv, 1x fully connected, softmax output) Returns: logits """ hidden_layers = model.create(network_input, keep_prob) #- output layer -# with tf.variable_scope('classifier') as scope: weights = model.weight_var([1024, NUM_CLASSES], 0.0) biases = model.bias_var([NUM_CLASSES]) # do not use tf.nn.softmax here -> loss uses tf.nn.sparse_softmax_cross_entropy_with_logits classifier = tf.add(tf.matmul(hidden_layers, weights), biases) model.variable_summaries(classifier, 'network-output') return classifier
import pylab import tensorflow as tf from tensorflow.models.rnn import rnn, rnn_cell import numpy as np import random import json import itertools import manage_data import export_to_octave import model import params parameters = params.parameters network_model = model.create(parameters) init = tf.initialize_all_variables() saver = tf.train.Saver(tf.all_variables()) # Launch the graph with tf.Session() as sess: saver.restore(sess, 'soccer-model') print "Saving vars." avars = tf.all_variables() for variable in avars: export_to_octave.save(variable.name.replace('/', '_') + '.mat', 'd', np.asarray(variable.eval())) print "All saved."
import numpy # from hyperopt import fmin, tpe, hp # space = [hp.quniform('lr', 0.00001, 1, 0.00001), # hp.quniform('bs', 100, 10000, 100), # hp.quniform('fhl', 10, 200, 10), # hp.quniform('shl', 10, 200, 10)] numpy.set_printoptions(threshold='nan') numpy.set_printoptions(precision=2) transform.transform_data ("/home/bogdan/work/repos/ml-tloe/serps/results/*", 'expanded', 10000) data = load.read_data_sets ('expanded/*',0.3,0.1, num = 00000); model.create ( H1=1, H2=50 ) # model.train (data, learning_rate=0.001, batch_size=100000, lmbda=0, ermul=10000, restore=False) model.run(data) ################################################################################ # def cost ((lr, bs, fhl, shl)): # return model.train_once (data, lr, int(bs), 0, int(fhl), int(shl), 31, 1) #(data, 0.003, 5000, 0, 150, 50, 31, 1) # best = fmin(cost, # space, # algo=tpe.suggest, # max_evals=1000) # print best
def drop_and_create_db(self): drop(self.engine) create(self.engine)