def test_add(): game = json.dumps({'ID': None, 'kind': 'game', 'name': 'Game4', 'company': 'Company1', 'minPlayers': 1, 'maxPlayers': 4, 'age': 10, 'length': 30, 'link': 'www.example.com', 'image': None, 'notes': 'Fun!'}) mini = json.dumps({'ID': None, 'kind': 'mini', 'name': 'Mini4', 'army': 'Orcs and Goblins', 'type': 'core', 'system': 'WFB', 'company': 'Company1', 'quantity': 10, 'status': 'painted', 'link': 'www.example.com', 'image': None, 'notes': 'Fun!'}) paint = json.dumps({'ID': None, 'kind': 'paint', 'name': 'Paint4', 'color': 'green', 'type': 'matte', 'company': 'Company1', 'quantity': 1, 'link': 'www.example.com', 'notes': 'Fun!'}) assert model.add(game) == '1' assert model.view(game) == '[4, "game", "Game4", "Company1", 1, \ 4, 10, 30, "www.example.com", null, "Fun!"]' assert model.add(mini) == '1' assert model.view(mini) == '[4, "mini", "Mini4", \ "Orcs and Goblins", "core", "WFB", "Company1", 10, "painted", \ "www.example.com", null, "Fun!"]' assert model.add(paint) == '1' assert model.view(paint) == '[4, "paint", "Paint4", "green", \
def save_task(): t = Task(request.form['title'], request.form['notes']) model.add(t) model.save_all() print request.form return "Save a task"
def save_task(): print request.form['task_description'] newtask = Task(request.form['task_description']) model.add(newtask) model.save_all() print newtask return "Thank you! Your task has been saved on the to-do list."
def get(self): y = yql.Public() query = 'select * from xml where url="http://www.espncricinfo.com/rss/content/story/feeds/0.xml"' res = y.execute(query) #last notification tweet = "A small Status" #tweet = res.rows[0]['channel']['item'][0]['description'] #fetch from GAE datastore fetched = model.fetch() if not fetched: model.add('INITIALIZATION') self.response.write('INIT') else: fetched = str(list(r.tweet for r in fetched)[0]) self.response.write(' Fetching ') if tweet == fetched: self.response.write(' Same Tweet ') pass else: model.add(tweet) if len(tweet) > 140: a, b = None, None a, b = split(tweet) update(a, b) self.response.write(' Updated & Added ') else: update(tweet, None) self.response.write(' Updated & Added ')
def get(self): y = yql.Public() query = 'select * from xml where url="http://www.espncricinfo.com/rss/content/story/feeds/0.xml"' res = y.execute(query) #last notification tweet = "A small Status" #tweet = res.rows[0]['channel']['item'][0]['description'] #fetch from GAE datastore fetched = model.fetch() if not fetched: model.add('INITIALIZATION') self.response.write('INIT') else: fetched = str(list(r.tweet for r in fetched)[0]) self.response.write(' Fetching ') if tweet == fetched: self.response.write(' Same Tweet ') pass else: model.add(tweet) if len(tweet) >140: a,b = None,None a,b = split(tweet) update(a,b) self.response.write(' Updated & Added ') else: update(tweet,None) self.response.write(' Updated & Added ')
def save_task(): task = request.form['task'] notes = request.form['notes'] t = Task(task,notes) model.add(t) model.save_all() return render_template("success.html", task=task, notes=notes)
def build(params, batch_size=None): """ Build the LSTM according to the parameters passed. The general architecture is set in the code. :param batch_size: If this param is not None is used to override the value set in the parameters dictionary. This is usefule when willing to build a network to make 1-step predictions. """ # Use ALWAYS the batch_size value from the parameter of the method. If not # set, then copy it from the params. if batch_size is None: batch_size = params['lstm_batch_size'] # Buuild the lstm. model = Sequential() # Check if my design has more than 1 layer. ret_seq_flag = False if params['lstm_numlayers'] > 1: ret_seq_flag = True # Add input layer. print('Adding layer #{:d} [{:d}]' .format(1, params['lstm_layer{:d}'.format(1)])) model.add(LSTM( params['lstm_layer1'], input_shape=(params['lstm_timesteps'], params['num_features']), stateful=params['lstm_stateful'], unit_forget_bias=params['lstm_forget_bias'], unroll=params['lstm_unroll'], batch_input_shape=(batch_size, params['lstm_timesteps'], params['num_features']), return_sequences=ret_seq_flag)) model.add(Dropout(params['lstm_dropout1'])) # Add additional hidden layers. for layer in range(1, params['lstm_numlayers']): if (layer+1) is params['lstm_numlayers']: ret_seq_flag = False print('Adding layer #{:d} [{:d}]'.format( layer+1, params['lstm_layer{:d}'.format(layer+1)])) model.add(LSTM( params['lstm_layer{:d}'.format(layer+1)], input_shape=(params['lstm_timesteps'], params['num_features']), stateful=params['lstm_stateful'], unit_forget_bias=params['lstm_forget_bias'], unroll=params['lstm_unroll'], batch_input_shape=(batch_size, params['lstm_timesteps'], params['num_features']), return_sequences=ret_seq_flag)) model.add(Dropout(params['lstm_dropout{:d}'.format(layer+1)])) # Output layer. model.add(Dense(units=1, input_dim=params['lstm_layer{:d}'.format( params['lstm_numlayers'])])) #model.add(Activation('linear')) model.compile( loss=params['lstm_loss'], optimizer=params['lstm_optimizer']) return model
def update(self): if random.random() <= 0.2: model.add(Ball(self._x, self._y)) self.change_dimension(2, 2) else: self.change_dimension(-1, -1) if self.get_dimension() == (0, 0): model.remove(self)
def update(self): eat = Hunter.update(self) if eat: daughter_ameba = Special(self._x, self._y) daughter_ameba.set_dimension(self._width, self._height) daughter_ameba.set_angle(self._angle+math.pi) model.add(daughter_ameba)
def bullet(self): self._counter += 1 if self._counter == 80: x = Hunter(self._x, self._y) x.radius = 3 x.color = 'white' model.add(x) self._counter = 0
def save_task(): new_task = Task(request.form['title']) #string from dict notes = request.form['notes'] #string from dict new_task.notes = notes #object new_task with attribute notes model.add(new_task) model.save_all() # return "Saved, theoretically" return redirect(url_for("home"))
def test_add_duplicate(): assert True == model.add({'id': '0', 'msg': 'test-0 first'}) assert False == model.add({'id': '0', 'msg': 'test-0 second'}) (tasks, success) = model.getTasks() assert [{ 'id': '0', 'msg': 'test-0 first' }] == sorted(tasks, key=itemgetter('id')) assert success == True
def upload_plants(file_name): # Grab the json string from data.txt f = open(file_name) for line in f: line = line.strip() info = line.split("|") entry = [info[0], info[1].split(","), info[2].split(",")] print entry plant = Plants(*entry) model.add(plant) model.save_all() f.close()
def test_add_multiple(): assert True == model.add({'id': '0', 'msg': 'test-0'}) assert True == model.add({'id': '1', 'msg': 'test-1'}) (tasks, success) = model.getTasks() assert [{ 'id': '0', 'msg': 'test-0' }, { 'id': '1', 'msg': 'test-1' }] == sorted(tasks, key=itemgetter('id')) assert success == True
def mouse_click(x, y): global objs, select obj_rem = None if select == 'Remove': for ob in objs: if ob.contains((x, y)): obj_rem = ob model.remove(obj_rem) else: exec('global sim\nsim = ' + select + str((x, y))) #print(sim) model.add(sim)
def test_add(): game = json.dumps({ 'ID': None, 'kind': 'game', 'name': 'Game4', 'company': 'Company1', 'minPlayers': 1, 'maxPlayers': 4, 'age': 10, 'length': 30, 'link': 'www.example.com', 'image': None, 'notes': 'Fun!' }) mini = json.dumps({ 'ID': None, 'kind': 'mini', 'name': 'Mini4', 'army': 'Orcs and Goblins', 'type': 'core', 'system': 'WFB', 'company': 'Company1', 'quantity': 10, 'status': 'painted', 'link': 'www.example.com', 'image': None, 'notes': 'Fun!' }) paint = json.dumps({ 'ID': None, 'kind': 'paint', 'name': 'Paint4', 'color': 'green', 'type': 'matte', 'company': 'Company1', 'quantity': 1, 'link': 'www.example.com', 'notes': 'Fun!' }) assert model.add(game) == '1' assert model.view(game) == '[4, "game", "Game4", "Company1", 1, \ 4, 10, 30, "www.example.com", null, "Fun!"]' assert model.add(mini) == '1' assert model.view(mini) == '[4, "mini", "Mini4", \ "Orcs and Goblins", "core", "WFB", "Company1", 10, "painted", \ "www.example.com", null, "Fun!"]' assert model.add(paint) == '1' assert model.view(paint) == '[4, "paint", "Paint4", "green", \
def operations_menu(): print( "1. Addition \n 2. Subtract \n 3. Multiply(*) \n 4. Divide(/) \n 5. Power(**) \n 6. Modulo(%)" ) option = int(input("Enter the number of operation you'd like to do: ")) if option == 1: a = int(input("Enter the first number: ")) b = int(input("Enter the second number: ")) res = add(a, b) elif option == 2: a = int(input("Enter the first number: ")) b = int(input("Enter the second number: ")) res = subtract(a, b) elif option == 3: a = int(input("Enter the first number: ")) b = int(input("Enter the second number: ")) res = multiply(a, b) elif option == 4: a = int(input("Enter the first number: ")) b = int(input("Enter the second number: ")) res = divide(a, b) elif option == 5: a = int(input("Enter the first number: ")) b = int(input("Enter the second number: ")) res = power(a, b) elif option == 6: a = int(input("Enter the first number: ")) b = int(input("Enter the second number: ")) res = residue(a, b) return res
def add(item: CartItem, repo: AbstractRepository, session) -> str: products = repo.list() if not is_valid_sku(item.sku, products): raise InvalidSku(f"Invalid sku {item.sku}") productref = model.add(item, products) session.commit() return productref
def test_update(): assert True == model.add({'id': '0', 'msg': 'test-0'}) model.update({'id': '0', 'msg': 'test-0 updated', 'msg2': 'New message'}) assert ([{ 'id': '0', 'msg': 'test-0 updated', 'msg2': 'New message' }], True) == model.getTasks()
def update(self, p): to_eat = Black_Hole.update(self,p) eaten = set() for obj in to_eat: if not isinstance(obj,Chain_Link): eoc = self.end_of_chain chain_x, chain_y = eoc.get_location() chain_angle = eoc.get_angle() new_x = chain_x - eoc.radius*cos(chain_angle) new_y = chain_y - eoc.radius*sin(chain_angle) new_link = Chain_Link(new_x, new_y, self.end_of_chain) new_link.change_location(-new_link.radius, -new_link.radius) self.end_of_chain = new_link model.add(new_link) eaten.add(obj) self.move() return eaten
def add_item(): if not session.get('logged_in'): abort(401) try: progress = float(request.form['progress']) if progress < 0: progress = 0 if progress > 1: progress = 1 except ValueError: flash("Invalid value for progress") return redirect(url_for('root')) try: model.add(db_items(), request.form['name'], progress, request.form['description']) except model.DataError: flash("Failed to add item") return redirect(url_for('root')) return redirect(url_for('root'))
def test(): #some data X_train, X_test, y_train, y_test, index_train, index_test = dutil.load_titanic() X_train = X_train.astype(numpy.float64) y_train = y_train.reshape(1,y_train.shape[0])[0].astype(numpy.int32) X_test = X_test.astype(numpy.float64) y_test = y_test.reshape(1,y_test.shape[0])[0].astype(numpy.int32) #train model = Classification() model.add(dense.DenseLayer(7, 20, name="hiddenLayer")) model.add(dense.DenseLayer(20, 2, name="outputLayer", W_init=defa, activation=softmax, learning_rate=0.001)) model.fit(X_train, X_test, y_train, y_test,)
import model import layer import optimizers import pickle import util import numpy import matplotlib.pyplot as plt train_set, val_set, test_set = pickle.load(open("mnist.pkl", "rb"), encoding='latin1') model = model.Sequence() model.add(layer.Dense(300, input_dim=28 * 28, activation="Relu")) #model.add(layer.Dense(300, activation="Relu")) model.add(layer.Dense(10)) train_y = util.to_categorical(train_set[1]) idx = numpy.random.choice(train_set[0].shape[0], 50000) train_set = train_set[0][idx] train_y = train_y[idx] model.init() model.fit(input_data=train_set, output_data=train_y, epoch=500, batch_num=10) model.compile(optimizer=optimizers.SGD(model, 0.1), loss="Mean_squared_error") model.train() id = 0 rightnum = 0 for now in val_set[0]: # plt.imshow(numpy.reshape(now,(28,28))) # plt.show()
if (epoch + 1) % 100 == 0: generator.save_weights("Generator{}.h5".format(epoch)) discriminator.save_weights( "Discriminator_weights{}.h5".format(epoch)) model.save_weights("Model{}.h5".format(epoch)) from google.colab.patches import cv2_imshow path = "/content/drive/MyDrive/cars_train/07336.jpg" X = cv2.imread(path) X = cv2.resize(X, (24, 24)) X = np.reshape(X, (1, 24, 24, 3)) X_batch = tf.cast(X, tf.float32) Y = generator(X_batch) cv2_imshow(X[0]) cv2_imshow(Y[0].numpy()) generator().summary() discriminator().summary() model = tf.keras.models.Sequential() model.add(generator()) model.add(discriminator()) model.summary() discriminator().compile(loss="binary_crossentropy", optimizer="rmsprop") discriminator().trainable = False model.compile(loss="binary_crossentropy", optimizer="rmsprop") train_dcgan(model, epochs=2200)
def test_raises_sku_do_not_match_exception_if_cannot_add(): cart_item = CartItem("item1", "SMALL-FORK", 10) with pytest.raises(StocknotMatch, match="SMALL-FORK"): add(Product("SMALL-FORK1", "SMALL FORK", 1, 'SOME BRAND2', 2), [cart_item])
def test_raises_out_of_stock_exception_if_cannot_add(): cart_item = CartItem("item1", "SMALL-FORK", 10) with pytest.raises(OutOfStock, match="SMALL-FORK"): add(Product("SMALL-FORK1", "SMALL FORK", 11, 'SOME BRAND2', 2), [cart_item])
def lyse(self): model.things = set([i for i in model.things if i != self]) for i in range(self._burst_size): model.add(Phage(self.get_location()[0], self.get_location()[1]))
def save_task(): title = request.form['task_title'] t = Task(title) model.add(t) model.save_all() return redirect(url_for("home"))
def train(): datasplit_path = "./gc_mc_master/gcmc/data/dec/complete/" fold = 1 N_EPOCHS = 300 BATCH_SIZE = 100 u_features, v_features, responses, trn_instance_idx, val_instance_idx, test_instance_idx, class_values, \ true_class, rating_mx_train, train_labels, train_u_indices, train_v_indices \ = dataUtil.loadData(DATASET, FEATURES, DATASEED, TESTING, datasplit_path, SPLITFROMFILE, VERBOSE, fold) #transform num_classes = len(class_values) num_oracles = u_features.shape[0] dim_instance = v_features.shape[1] # matrix completion # train dataset #response_complete = matrix_completion_GCN(u_features, v_features, rating_mx_train, train_labels, train_u_indices,train_v_indices, class_values, trn_instance_idx) response_complete = matrix_completion_NMF(rating_mx_train, train_labels, train_u_indices, train_v_indices, class_values, trn_instance_idx) # for fold x_train = v_features[trn_instance_idx].toarray() y_train = true_class[trn_instance_idx] x_test = v_features[test_instance_idx].toarray() y_test = true_class[test_instance_idx] # sparse + iwmv predictY, weight, id_conf = IWMV(responses[trn_instance_idx], class_values, dict()) acc_iwmv_trn = accuracy_score(y_train, predictY) acc_iwmv_per_cls = accuracy_per_class(y_train, predictY) #print("iwmv train acc/F1/acc per cls: ",acc_iwmv_trn, acc_iwmv_per_cls) model0 = build_base_model(dim_instance, num_classes) y_imwv_trn = np.zeros((len(x_train), 2)) for i in range(len(predictY)): y_imwv_trn[i][int(predictY[i])] = 1 y_imwv_tst = np.zeros((len(y_test), 2)) for i in range(len(y_test)): y_imwv_tst[i][int(y_test[i])] = 1 model0.fit(x_train, y_imwv_trn, epochs=60) accuracy_trn0 = eval_model(model0, x_train, y_train) accuracy_tst0 = eval_model(model0, x_test, y_test) print('iwmv train acc/F1/acc per cls: ', accuracy_trn0, 'test acc/F1/acc per cls:', accuracy_tst0) # complete + iwmv predictY, weight, id_conf = IWMV(response_complete, class_values, dict()) acc_iwmv_trn = accuracy_score(y_train, predictY) acc_iwmv_per_cls = accuracy_per_class(y_train, predictY) #print("iwmv train acc/F1/acc per cls: ", acc_iwmv_trn, acc_iwmv_per_cls) model0 = build_base_model(dim_instance, num_classes) y_imwv_trn = np.zeros((len(x_train), 2)) for i in range(len(predictY)): y_imwv_trn[i][int(predictY[i])] = 1 y_imwv_tst = np.zeros((len(y_test), 2)) for i in range(len(y_test)): y_imwv_tst[i][int(y_test[i])] = 1 model0.fit(x_train, y_imwv_trn, epochs=60) accuracy_trn0 = eval_model(model0, x_train, y_train) accuracy_tst0 = eval_model(model0, x_test, y_test) print('iwmv-complete train acc/F1/acc per cls: ', accuracy_trn0, 'test acc/F1/acc per cls:', accuracy_tst0) # prediction block (bottleneck layer) model = build_base_model(dim_instance, num_classes) model2 = build_base_model(dim_instance, num_classes) # crowd layer # add crowds layer on top of the base model model.add(CrowdsClassification(num_classes, num_oracles, conn_type="MW")) # sparse + crowd layer model2.add(CrowdsClassification(num_classes, num_oracles, conn_type="MW")) # complete + crowd layer # instantiate specialized masked loss to handle missing answers loss = MaskedMultiCrossEntropy().loss loss2 = MaskedMultiCrossEntropy().loss # compile model with masked loss and train model.compile(optimizer='adam', loss=loss) model.fit(x_train, responses[trn_instance_idx], epochs=N_EPOCHS, shuffle=True, batch_size=BATCH_SIZE, verbose=2) # model2.compile(optimizer='adam', loss=loss2) model2.fit(x_train, response_complete, epochs=N_EPOCHS, shuffle=True, batch_size=BATCH_SIZE, verbose=2) # save weights from crowds layer for later #weights = model.layers[4].get_weights() # remove crowds layer before making predictions model.pop() model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model2.pop() model2.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) accuracy_trn = eval_model(model, x_train, y_train) accuracy_test = eval_model(model, x_test, y_test) print('Train acc/F1/acc per cls: ', accuracy_trn) print('Test acc/F1/acc per cls: ', accuracy_test) accuracy_trn = eval_model(model2, x_train, y_train) accuracy_test = eval_model(model2, x_test, y_test) print('Train2 acc/F1/acc per cls: ', accuracy_trn) print('Test2 acc/F1/acc per cls: ', accuracy_test)
def test_add(): assert True == model.add({'id': '0', 'msg': 'test-0'}) assert ([{'id': '0', 'msg': 'test-0'}], True) == model.getTasks()
def make_resnet_model(): model = Sequential() model.add(Input(shape=(300, 300, 3), name='input_layer'), ) model.add(ZeroPadding2D(padding=(3, 3))) model.add(Conv2D(32, (10, 10), strides=2, kernel_initializer='he_normal')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(ZeroPadding2D(padding=(1, 1))) model.add(MaxPooling2D((2, 2), strides=1, padding='same')) model.add( Conv2D(32, (1, 1), strides=1, padding='valid', kernel_initializer='he_normal')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add( Conv2D(32, (3, 3), strides=1, padding='same', kernel_initializer='he_normal')) model.add(BatchNormalization()) model.add(Activation('relu')) # model.add(MaxPooling2D((2, 2), strides=1, padding='same')) model.add( Conv2D(32, (1, 1), strides=2, padding='valid', kernel_initializer='he_normal')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add( Conv2D(32, (3, 3), strides=1, padding='same', kernel_initializer='he_normal')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add( Conv2D(32, (3, 3), strides=1, padding='valid', kernel_initializer='he_normal')) model.add(BatchNormalization()) model.add(Activation('relu')) # model.add(MaxPooling2D((2, 2), strides=1, padding='same')) # model.add(Conv2D(8, (1, 1), strides=1, padding='same', activation='relu', kernel_initializer='he_normal')) # model.add(Flatten()) # model.add(Dense(8, activation='relu')) # model.add(Dropout(0.5)) model.add(GlobalAveragePooling2D()) model.add(Dense(3, activation='softmax', name='output_layer')) model.summary() return model
def test_delete_missing(): assert True == model.add({'id': '0', 'msg': 'test-0'}) model.delete('1') assert ([{'id': '0', 'msg': 'test-0'}], True) == model.getTasks()
import model import layer import optimizers import numpy import pickle model = model.Sequence() model.add(layer.Dense(1, input_dim=2)) model.add(layer.Dense(2)) model.add(layer.Dense(5)) w = numpy.array([[1], [9]]) w2 = numpy.array([[5, 4]]) w3 = numpy.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]) text_x = numpy.random.randn(1000, 2) text_y = numpy.dot(text_x, w) text_y = numpy.dot(text_y, w2) text_y = numpy.dot(text_y, w3) text_y = text_y model.init() model.fit(text_x, text_y, epoch=10000, batch_num=100) model.compile(loss="Mean_squared_error", optimizer=optimizers.SGD(model, speed=0.000001)) model.train() t = "" isfirst = True for now in model.now_model: print(now.w)
x = numpy.empty((len(games),m.input_dim())) ywin = numpy.empty((len(games),1)) yscore = numpy.empty((len(games),2)) for i in range(len(games)): g = games[i] (year, week, date) = g.game_time() (road_team_id,home_team_id) = g.teams() m.set_input_data(year, week, date, road_team_id, home_team_id, x, i) ywin[i,0] = g.target_data_win() yscore[i,0] = g.score()[0] yscore[i,1] = g.score()[1] pass model = tensorflow.keras.models.Sequential() model.add(tensorflow.keras.layers.BatchNormalization(input_shape=(m.input_dim(),))) model.add(tensorflow.keras.layers.Dense(m.neurons()[0], activation='relu')) model.add(tensorflow.keras.layers.Dense(1, activation='sigmoid')) model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['binary_accuracy']) model.fit(x, ywin, epochs=m.epochs()[0], batch_size=1024) model.save(m.name()+".win.h5") del model model = tensorflow.keras.models.Sequential() model.add(tensorflow.keras.layers.BatchNormalization(input_shape=(m.input_dim(),))) model.add(tensorflow.keras.layers.Dense(m.neurons()[1], activation='relu')) model.add(tensorflow.keras.layers.Dense(2, activation='linear')) model.compile(optimizer='nadam', loss='mean_squared_error') model.fit(x, yscore, epochs=m.epochs()[1], batch_size=1024) model.save(m.name()+".score.h5") del model
import model m1 = model.mod() m1.prin() print(model.add(1, 3))
def save_task(): t = m.Task(request.form["title"], notes=request.form["notes"]) m.add(t) m.save_all() return redirect("/")
def model(): #This is our LSTM model. we have used keras laters here. The loss function is mean squared error. we have used 'Adam Optimizer' mod = Sequential() mod.add( LSTM(units=64, return_sequences=True, input_shape=(X_train.shape[1], 9))) mod.add(Dropout(0.2)) mod.add(BatchNormalization()) mod.add(LSTM(units=64, return_sequences=True)) mod.add(Dropout(0.1)) mod.add(BatchNormalization()) mod.add((LSTM(units=64))) mod.add(Dropout(0.1)) mod.add(BatchNormalization()) mod.add((Dense(units=16, activation='tanh'))) mod.add(BatchNormalization()) mod.add((Dense(units=4, activation='tanh'))) mod.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy', 'mean_squared_error']) mod.summary() return mod
def save_task(): t = m.Task(request.form['title'], notes=request.form['notes']) m.add(t) m.save_all() return redirect("/")
def model_arch(): model = Sequential() model.add( Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) return model
def launch_parasite(self, b): if b: model.add(Parasite(self._x,self._y,self.target1,self)) else: model.add(Parasite(self._x,self._y,self.target2,self))
from dense import DenseLayer import model import numpy as np if __name__ == "__main__": model = model.Model() x = np.array([[1, 1], [1, 0], [0, 1], [0, 0]]) y = np.array([[0], [1], [1], [0]]) model.add(DenseLayer((2, 2), 'relu')) model.add(DenseLayer((2, 4), 'relu')) model.add(DenseLayer((4, 1), 'sigmoid')) model.compile("mse") model.fit(x, y, 0.1, 4, 2000) #model.printm() print model.predict(np.array([[1, 1], [0, 1], [1, 0], [0, 0]]))
#coding=utf-8 import model models = model.add(11,22) print(models)