def value(self): pir = self.pir.value isActive = getData(self.topicIsActive) steal = getData(self.topicSteal) return (pir, isActive, steal, self.topicIsActive, self.topicSteal)
def value(self): humidity, temperature = Adafruit_DHT.read_retry(self.dht11, self.pin) valueHumidity = getData(self.topicHum) valueTemperature = getData(self.topicTem) topicHumidity = self.topicHum topicTemperature = self.topicTem return (humidity, temperature, valueHumidity, valueTemperature, topicHumidity, topicTemperature)
def startPir(self): previousState = getData(self.topicSteal) while True: actualState = self.pir.motion_detected if actualState != previousState: if actualState and getData(self.topicIsActive): self.connectionMQTT.addPublished(self.topicSteal, True) if not getData(self.topicIsActive): self.connectionMQTT.addPublished(self.topicSteal, False) previousState = actualState sleep(0.2)
def j_init(): stemmer = SnowballStemmer("english") word_list = getWord() word_list = normalize_words((word_list)) weight = dict() for word in word_list: weight[word] = 0 haaretz_list, israel_hayom_list = getData() # cleaning out bad words. haaretz_list = (cleanse_raw(haaretz_list)) israel_hayom_list = (cleanse_raw(israel_hayom_list)) # gathering validation h_validate = haaretz_list[int((len(haaretz_list) / 10) * 8):] i_validate = israel_hayom_list[int((len(israel_hayom_list) / 10) * 8):] # train set h_train = haaretz_list[:int((len(haaretz_list) / 10) * 8)] i_train = israel_hayom_list[:int((len(israel_hayom_list) / 10) * 8)] # starting with 1 for haaretz today. counter_for_dictionary(i_train, 1, stemmer) counter_for_dictionary(h_train, -1, stemmer) normalize_weights() ilen, hlen = avarage_length(i_train, h_train) mids = (ilen + hlen) / 2
def test(): # model = AlexNet(num_classes=257) # model.load_state_dict(torch.load("output/weight_saved/Alexnet-9-1.5478705212275188-62.533333251953124.pt")) # model = resnet50(num_classes=257) # model.load_state_dict(torch.load("output/weight_saved/Resnet50-9-0.6717463796933492-83.76666666666667.pt")) model = wide_resnet101_2(num_classes=257) model.load_state_dict( torch.load( "output/weight_saved/WideResnet101-9-0.8427028404871623-80.26666666666667.pt" )) if cuda_: model.cuda() testData = getData("test") test_loader = torch.utils.data.DataLoader(testData, batch_size=16, shuffle=False, num_workers=4) criterion = torch.nn.CrossEntropyLoss() _, test_acc = val_run(test_loader, model, criterion, device=cuda_) print(test_acc) # AlexNet 64.04059933133415
def monthlyPayoffTimingAllVintages(): import matplotlib.ticker as t import data as s import pathlib df = s.getData() s.addCalulatedFields(df) freddie_path = pathlib.Path(OUTPUT_PATH) fig, axes = plt.subplots(2, 1) fig.set_size_inches(7, 9) d1 = monthlyPayoffTiming(df, axes[0], yr=1999, dollar=False) #, msa=10900) d2 = monthlyPayoffTiming(df, axes[1], yr="all", dollar=True) axes[0].yaxis.set_major_formatter(t.FormatStrFormatter('%2.0f%%')) axes[0].set_title(axes[0].title._text, fontsize=10) axes[0].set_xlabel('') axes[0].set_ylabel('Percentage') axes[1].set_title(axes[1].title._text, fontsize=10) axes[1].set_xlabel('Age') axes[1].set_ylabel('\$MM') fig.suptitle("Payoff By Termination Age") plt.show() return
def start(self): value = getData(self.topic) if value == self.minAngle: self.closeDoor() else: self.openDoor() self.connection.addPublished(self.topic, value)
def __init__(self): QMainWindow.__init__(self) plot = QwtPlot() self.setCentralWidget(plot) d = data.getData('table.csv') xs = np.arange(len(d)) ps = np.array([float(i[6]) for i in d]) ts = np.array([float(i[5]) for i in d]) c2 = VolumeCurve() c2.setData(xs, ps, ts) c2.attach(plot) c1 = PriceCurve() c1.setData(xs, ps) c1.attach(plot) self.selector = Selector(plot) self.zoomer = Zoomer(plot) self.pickers = [self.selector, self.zoomer] self.select_picker(self.zoomer) self.panner = QwtPlotPanner(plot.canvas()) self.panner.setMouseButton(Qt.MidButton) self.createActions() self.showMaximized()
def trust(): users = getData() cutOff = 0.03 trustMat = [] for user1 in users: for user2 in users: if user1 == user2: continue cnt = 0 mo = {} for (m1, r1) in users[user1]: mo[m1] = 1 for (m2, r2) in users[user2]: if mo.has_key(m2): cnt += 1 if cnt / len(users[user1]) >= cutOff and cnt / len( users[user2]) >= cutOff: trustMat.append([user1, user2]) return users, np.asarray(trustMat)
def setObservation(): global observation, loc, city data.readData() city = data.getData("weather_city") loc = city + ",SE" observation = owm.three_hours_forecast(loc)
def ReadValue(self, options): databyte = [] string = data.getData("rgb_single").split(",") databyte.append(dbus.Byte(int(string[0]))) databyte.append(dbus.Byte(int(string[1]))) databyte.append(dbus.Byte(int(string[2]))) return databyte
def main(): #parse() configDict = getConfigs() modelPath = configDict['modelPath'] model = pm.loadModel(modelPath) dataPath = configDict['dataPath'] label = configDict['dataLabel'] dataFormat = configDict['dataFormat'] classification = configDict['classification'] data = d.getData(dataPath, label) # print(data['train'][0]) # pm.printModelParams(model) logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s') logging.debug('This is a log message.') space = pm.parameterSpace("../input/parameter_space.json") X = data['train'][0] y = data['train'][1] X_test = data['test'][0] y_test = data['test'][1] if classification == 'multiclass': y = LabelBinarizer().fit_transform(y) y_test = LabelBinarizer().fit_transform(y_test) y = y.apply(lambda x: 0 if x == 2 else x) # for binary classification y_test = y_test.apply(lambda x: 0 if x == 2 else x) # for binary classification search = om.Search(model, space, X, y, X_test, y_test) #search.run() search.inf_search() #print(search.space) print('End\n')
def __init__(self,data,flaglt): wx.Frame.__init__(self,None,-1,"Analysis",size=(200,200)) self.data = data self.client = getData(flaglt) #print data menubar = wx.MenuBar() followMenu = wx.Menu() sexitem0 = followMenu.Append(-1,u'性别图','about the sex') localitem0 = followMenu.Append(-1,u'区域图','about the city') tagitem0 = followMenu.Append(-1,u'标签图','abouth the tag') topitem0 = followMenu.Append(-1,u'Top10','top10 follows') menubar.Append(followMenu,u'&粉丝') friendMenu = wx.Menu() sexitem1 = friendMenu.Append(-1,u'性别图','about the sex') localitem1 = friendMenu.Append(-1,u'区域图','about the city') tagitem1 = friendMenu.Append(-1,u'标签图','abouth the tag') topitem1 = friendMenu.Append(-1,u'Top10','top10 follows') menubar.Append(friendMenu,u'&关注') weiboMenu = wx.Menu() statusitem = weiboMenu.Append(-1,u'影响力','about his weibo') topicitem = weiboMenu.Append(-1,u'关注面','what he cares') menubar.Append(weiboMenu,u'微博') backMenu = wx.Menu() menubar.Append(backMenu,u'返回') exititem = backMenu.Append(-1,u'返回搜索','back to search') self.SetMenuBar(menubar) self.Bind(wx.EVT_MENU,self.Sex0,sexitem0) self.Bind(wx.EVT_MENU,self.Local0,localitem0) self.Bind(wx.EVT_MENU,self.Tag0,tagitem0) self.Bind(wx.EVT_MENU,self.Top0,topitem0) self.Bind(wx.EVT_MENU,self.Sex1,sexitem1) self.Bind(wx.EVT_MENU,self.Local1,localitem1) self.Bind(wx.EVT_MENU,self.Tag1,tagitem1) self.Bind(wx.EVT_MENU,self.Top1,topitem1) self.Bind(wx.EVT_MENU,self.Influence,statusitem) self.Bind(wx.EVT_MENU,self.careTopic,topicitem) self.Bind(wx.EVT_MENU,self.OnBack,exititem) mastersizer = wx.BoxSizer(wx.VERTICAL) topsizer = wx.BoxSizer(wx.HORIZONTAL) middlesizer = wx.BoxSizer(wx.VERTICAL) self.createPhoto(topsizer) self.createTitle(topsizer) self.createText(middlesizer) mastersizer.Add(topsizer) mastersizer.Add(middlesizer) self.SetSizer(mastersizer)
def statecases(): state = request.form['state'] _, total, newcases, death = data.getData(state) return render("states.html", state=state, total=total, newcases=newcases, death=death)
def main(): image_sets = data.getData() for images in image_sets[0:2]: slides, score = createSlides(images) continue return 0
def start(self): value = getData(self.topic) print(value) if value: self.turnOn() else: self.turnOff() print('valor',value) self.connection.addPublished(self.topic, value )
def getNumDataRoute(): try: numRows = int(request.args.get('numRows')) except: return jsonify( "getDataRoute Error: numRows query parameter must be a valid integer" ) dataList = getData(numRows) return jsonify(dataList), status.HTTP_200_OK
def ReadValue(self, options): databyte = [] string = data.getData("rgb_flash_sequence").split(":") for s in string: string2 = s.split(",") databyte.append(dbus.Byte(int(string2[0]))) databyte.append(dbus.Byte(int(string2[1]))) databyte.append(dbus.Byte(int(string2[2]))) return databyte
def callback(self, topic, value): state = getData(topic) if value != state and isinstance(value, int): if value == self.minAngle: self.closeDoor() setData(topic, value) elif value == self.maxAngle: self.openDoor() setData(topic, value)
def main(inargs=None): args, unk = parseArgs(inargs) command = args.command.lower() #my_print(args.path) SESSION = getConnection(args.path) if command == 'getdata': try: getData(SESSION) my_print("Updated the Database!") except Exception as e: SESSION.rollback() my_print(e) my_print("Rolled the database back") elif command == 'getdistances': getDistances(SESSION, args.user) elif command == 'extractstuff': extractStuff(SESSION) elif command == 'getWalkBoxPlot'.lower(): getWalkBoxPlot(SESSION) elif command == 'getDriveBoxPlot'.lower(): getDriveBoxPlot(SESSION) elif command == 'getTravelBoxPlot'.lower(): getTravelBoxPlot(SESSION) elif command == 'getDamageStats'.lower(): getDamageStats(SESSION) elif command == 'getHeadShotStats'.lower(): getHeadShotStats(SESSION) elif command == 'getKillsStats'.lower(): getKillsStats(SESSION) elif command == 'makeTable'.lower(): makeTable(SESSION, args.user, args.columns, args.limit) else: my_print("invalid command {}".format(command))
def main(): args = setup_args() outfile = args.out_dir + args.comment f_out = open(outfile, 'w') #Write meta-info about the particular run into the master file before each run timestr = time.strftime("%Y%m%d-%H%M%S") f = open(master_meta_info_file, 'a+') f.write(timestr + " #### " + args.comment + " ##### " + str(args) + "\n") f.close() hole_feature_filename = args.out_dir + "hole_features_" + args.comment dataset = getData(args.hole_window_size, args.num_files * args.num_of_holes_per_file, args.dataset_type, args.sup_window_size, args.num_sup_tokens, args.num_of_holes_per_file, args.sup_def, args.method) #Get the size of the vocabulary vocab_size, encoder = get_vocab_size() model = Seq2SeqModel(vocab_size, bias_init=None) if args.load_model: y = tf.reshape(tf.Variable(1, dtype=tf.int32), (1, 1)) model(y, y, False) model.load_weights( args.model_load_dir).expect_partial() #to supress warnings print("Loaded Weights from: ", args.model_load_dir) size = args.num_files * args.num_of_holes_per_file bar = tqdm(total=size) print("Evaluating " + args.dataset_type + " Data.......") subword_loss, token_loss, error, hole_features = evaluate( model, dataset, args.method, bar, args.inner_learning_rate, args.sup_batch_size, args.num_of_updates) bar.close() print(args.dataset_type + " Statistics..........") f_out.write(args.dataset_type + " Statistics..........") print("Token Cross-Entropy = {:.4f} ".format(token_loss)) print("{:.4f} confidence error over mean cross-entropy = {:.4f}".format( CONFIDENCE_INTERVAL, error)) f_out.write("Token Cross-Entropy = {:.4f} ".format(token_loss)) f_out.write( "{:.4f} confidence error over mean cross-entropy = {:.4f}".format( CONFIDENCE_INTERVAL, error)) f_out.flush() with open(hole_feature_filename, 'wb') as f: pickle.dump(hole_features, f)
def ReadValue(self, options): string = data.getData("rgb_flash_delay") value = int(string) part_1 = (value >> 8) & 0xFF part_2 = value & 0xFF return [dbus.Byte(part_1), dbus.Byte(part_2)]
def menu(menu): data = getData() d = data["menus"][menu] return render_template('menu.html', menus=data["menus"], menu_key=menu, menu_title=d["menu_title"], summary=d["summary"], items=d["items"], images=d["images"])
def prepare_data_own(self): self.german_vocab, self.english_vocab, train_data, valid_data, test_data = getData( LOAD_NEW_METHOD) self.train_iterator, self.valid_iterator, self.test_iterator = Batcher( train_data, valid_data, test_data) self.src_vocab_size = len(self.german_vocab) self.trg_vocab_size = len(self.english_vocab) self.src_pad_idx = self.english_vocab.stoi["<pad>"] self.pad_idx = self.english_vocab.stoi["<pad>"]
def post(self): include = 'Aliases,AssociatedWith,CollaboratorWith,Contemporaries,Followers,GroupMembers,Influencers,MemberOf,Similars,Images,Moods,MusicBio,MusicStyles,Themes,Web' nodeList = ['associatedWith', 'collaboratorWith', 'contemporaries', 'followers', 'groupMembers', 'influencers', 'similars'] query = self.request.get('query') params = {'endpoint':'music', 'entitytype':'artist', 'query':query} json = data.getData(params = params, include = include) json = data.treeFormat(json, nodeList) template = JINJA_ENVIRONMENT.get_template('view.html') self.response.write(template.render({'data':json}))
def OnSearch(self,event): string = self.basicText.GetValue() self.basicText.Clear() flaglt = [False,False,False] self.client = getData(flaglt) data = self.client.searchUser(string) frame1 = main.My_Frame1(data,flaglt) frame1.Show() frame1.Center() self.Destroy()
def main(): train, train_cls, test, test_cls = data.getData() train_cls = shirink_class(train_cls) test_cls = shirink_class(test_cls) feat2id, sent2feat, feat2weights = generate_features( train, train_cls, {}, {}) feat2id, sent2feat, feat2weights = generate_features( test, test_cls, feat2id, sent2feat) print("total feature numbers : ", len(feat2id)) training(train, train_cls, test, test_cls, feat2id, sent2feat, feat2weights)
def ReadValue(self, options): string = data.getData("autosleep_time") string = string.split(",") time1 = string[0].split(":") time2 = string[1].split(":") return [ dbus.Byte(int(time1[0])), dbus.Byte(int(time1[1])), dbus.Byte(int(time2[0])), dbus.Byte(int(time2[1])) ]
def callback(self, topic, value): state = getData(topic) if value != state and isinstance(value, bool): if value: self.turnOn() setData(topic, value) else: self.turnOff() setData(topic, value) print('valor de ',self.value())
def getScoreOfUri(uri): model = None if (uri not in dataCache): dataCache[uri] = getData(uri) X_train, X_test, y_train, y_test = dataCache[uri] if (uri not in modelCache): modelCache[uri] = getModel(X_train, y_train) model = modelCache[uri] return model.score(X_test, y_test)
def edit_expense(id): data_i = data.getData(id) form = expenseForm(request.form) form.info.data = data_i['info'] form.category.data = data_i['category'] form.amount.data = data_i['amount'] if request.method == 'POST' and form.validate(): info = request.form['info'] category = int(request.form['category']) amount = float(request.form['amount']) data.changeData(id, info, category, amount) flash('Expense updated', 'success') return redirect(url_for('home')) return render_template('add_expense.html', form=form, title='Edit')
def getSorted(cls, data, attrOrder): if not attrOrder: data.sort() return data sortDict = {} nextAttr = attrOrder[0] #start with the first attrList = data.getAttributeSet(nextAttr) attrList.sort() newData = [] for attrVal in attrList: subset = data.getData(attributes = {nextAttr : attrVal}) sortedSubset = cls.getSorted(subset, attrOrder[1:]) newData.extend(sortedSubset) return newData
def create_train_test_csvs(self): self.log('Creating train/test CSVs...') X, Y, dictActivities = data.getData(self.dataset_select) x_train, x_test, y_train, y_test = train_test_split(X, Y, shuffle=False, train_size=300, random_state=seed) x_test, x_validation, y_test, y_validation = train_test_split(x_test, y_test, shuffle=False, test_size=400, random_state=seed) if os.path.isfile('CSVs/annotations.csv'): self.log_warn('[WARNING] Annotations file is present. Annotations will be appended to the training set.') annotations = pd.read_csv('CSVs/annotations.csv', skiprows=1, header=None) y_annotations = annotations.iloc[:,-1:] x_annotations = annotations.drop(annotations.columns[-1], axis=1) x_train = pd.DataFrame(x_train) x_train = pd.concat([x_train, x_annotations]) x_train = x_train.values y_train = pd.DataFrame(y_train) y_annotations = y_annotations.rename(columns={2000:0}) y_train = pd.concat([y_train, y_annotations]) y_train = y_train.values x_test = pd.DataFrame(x_test) y_test = pd.DataFrame(y_test) x_validation = pd.DataFrame(x_validation) y_validation = pd.DataFrame(y_validation) x_train_write = pd.DataFrame(x_train) y_train_write = pd.DataFrame(y_train) x_train_write.to_csv('CSVs/x_train.csv', index=False, header=False) y_train_write.to_csv('CSVs/y_train.csv', index=False, header=False) x_test.to_csv('CSVs/x_test.csv', index=False, header=False) y_test.to_csv('CSVs/y_test.csv', index=False, header=False) x_validation.to_csv('CSVs/x_validation.csv', index=False, header=False) y_validation.to_csv('CSVs/y_validation.csv', index=False, header=False) y_train = y_train.astype('int') return x_train, x_test, x_validation, y_train, y_test, y_validation, dictActivities
def predicting(self, dates): """ Make a prediction based on the given dates. """ datas = [0] * len(dates) result = [] data = stock.getData( self.ticker, 'default', 'default', "default", "default") #query all the possible data from quandl for i, date in enumerate( dates ): #for all the given dates, get their open, high, and volume and make predictions with the regressor temp = data.ix[date] datas[i] = [temp[0], temp[1], temp[4]] result.append(temp[5]) predicts = self.clf.predict(datas) self.pred_result = predicts #Store the array with all the predictions to self.pred_result self.act_result = result #Store the array with all the actual results to self.act_result for comparison return predicts
def plot_tree_layout(): # берем 2й и 3й признаки, 3 класса # pair = [2,3] # n_classes = 3 # plot_colors = "ryb" # plot_step = 0.02 # X = iris.data[:, pair] # y = iris.target n_classes = 3 plot_colors = "rb" plot_step = 0.02 # >>> clf = tree.DecisionTreeRegressor() # >>> clf = clf.fit(X, y) # >>> clf.predict([[1, 1]]) # передали признаки и ответы # X, y = getData() # clf = DecisionTreeRegressor().fit(X, y) # clf.predict([to_predict]) # передали признаки и ответы X, y = getData() clf = DecisionTreeClassifier().fit(X, y) # подготовили карту x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step)) # классифицируем карту Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdYlBu) # отрисовываем все for i, color in zip(range(n_classes), plot_colors): idx = np.where(y == i) plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i], cmap=plt.cm.RdYlBu, edgecolor='black', s=80) plt.show()
DeltaTwo += d3.T * (secLayer[i,:]) b = np.asmatrix(((X[i,:]))) DeltaOne += d2*b m = float(m) DeltaOne /= m DeltaTwo /= m DeltaOne[0:,1:] += thetaO[0:,1:]*lam/m DeltaTwo[0:,1:] += thetaT[0:,1:]*lam/m return DeltaOne, DeltaTwo thetaO, thetaT, X, Y = getData() def flatToWeight(weights, weightDim): x_o, x_t = weightDim[0], weightDim[1] array = weights[0:x_o*x_t] thetaO = np.reshape(weights[0:(x_o * x_t)], (x_o,x_t), order = "C") x_o, x_t = weightDim[2], weightDim[3] thetaT = np.reshape(weights[weightDim[0]*weightDim[1]:], (x_o,x_t), order = "C") return thetaO, thetaT def getGradient(weights, X, Y, weightDim, lam): thetaO, thetaT = flatToWeight(weights, weightDim) lam = 1
def catering(): data = getData() return render_template('catering.html', summary=data["catering"]["summary"], items=data["catering"]["items"])
def events(): data = getData() return render_template('events.html', summary=data["events"]["summary"], items=data["events"]["items"])
def service(): data = getData() return render_template('service.html', items=data["service"]["items"])
def update(): patch = json.loads(request.data) setData(updateData(patch, getData())) return json.dumps(getData())