def self_model(): feature_vocab = _build_feature_vocab(Option.src) from perm import TreeParamPermModel as PM pm = PM(feature_vocab, Option) logger.info('[Train] Loading the source language...:%s' % Option.src) Global.mx_dep = Option.mx_dep_train data_train = Data(Option.src, conll_reader, valid=lambda x: len(x) < Option.mx_len) logger.info('Training...') fit(pm, {'data': data_train, 'mx_sent': Option.mx_sent, 'batch_size': Option.batch_size}, nb_epoch=Option.mx_epoch, callbacks=_callbacks(model='')) save(fn=Option.model, model={'model': pm.state_dict(), 'feature_vocab': feature_vocab})
def train(self): # collect training data cls_num = len(self.gestures) array=0 for n in range(int(cls_num * self.session_num)): label = self.gestures[n % cls_num] for k in range(self.training_unit): self.training_data[1+array][0] = self.label_encoder.transform([label])[0] array+=1 # start training X = self.training_data[1:, 1:] # the first row is fake data Y = self.training_data[1:, 0].reshape(-1, 1) # the first row is fake data self.scaler = StandardScaler().fit(X) # normalize X X = self.scaler.transform(X) self.clf, tr_acc = util.fit(X, Y, model=self.model) #gui.AlertUI(msg="Training accuracy: {}".format(tr_acc)) self.accuracy="Training accuracy: {}".format(tr_acc) print(self.accuracy) if self.model == "SVM": model_dict = { "emg": self.emg_data[1:, :], "X": X, "Y": Y, "scaler": self.scaler, "label_enc": self.label_encoder, "clf": self.clf, } path = "{}/model-{}.model".format( PATH_PREFIX, self.person_id) f = open(path, "wb+") f.write(pickle.dumps(model_dict))
def guillotine(W, H, N, rectangles): rectangles = u.merge_sort(rectangles) if (N < 1): g.set_draws(W, H, rectangles) g.draw() return aux = rectangles[0] aux.x = 0 aux.y = 0 aux.xx = aux.x + aux.w aux.yy = aux.y + aux.h actualx = aux actualy = aux cutted = [] noncutted = [] cutted.append(aux) for i in range(1, len(rectangles)): cut, rectangles[i] = u.fit(rectangles[i], actualx, actualy, W, H) if (cut == 1): rectangles[i].x = actualx.xx rectangles[i].y = actualy.y rectangles[i].xx = rectangles[i].w + rectangles[i].x rectangles[i].yy = rectangles[i].h + rectangles[i].y actualx = rectangles[i] cutted.append(rectangles[i]) elif (cut == 2): rectangles[i].x = aux.x rectangles[i].y = actualy.yy rectangles[i].xx = rectangles[i].w + rectangles[i].x rectangles[i].yy = rectangles[i].h + rectangles[i].y actualy = rectangles[i] actualx = rectangles[i] cutted.append(rectangles[i]) elif (cut == 3): noncutted.append(rectangles[i]) print("La pieza ", rectangles[i].label, " no ha sido cortada") for i in range(len(rectangles)): print(rectangles[i].label, rectangles[i].orientation) for i in range(len(noncutted)): print(noncutted[i].label) waste, area = uc.waste(W, H, noncutted, 1) do.results(1, waste, area, len(cutted), cutted) g.set_draws(1, W, H, cutted) g.draw()
def permute(): if not Option.model: logger.info('[Warning] No model file will be saved since --model isn\'t specified!') logger.info('[Target] Loading the target language...:%s' % Option.tgt) tgt_data = Data(Option.tgt, conll_reader if Option.tgt.endswith('.conllu') else text_reader) lm_tgt = _load_lm('Target', tgt_data) from perm import UParamPermModel as PM if Option.pretrain: logger.info('Loading the pretrained model...:%s' % Option.pretrain) ptr_pm = load(Option.pretrain) feature_vocab = ptr_pm['feature_vocab'] pm = PM(feature_vocab, Option, lm_tgt, smoother=lm_tgt.smoother, lambdap=lm_tgt.lambdap, vocab_size=lm_tgt.vocab_size) pm.load_state_dict(ptr_pm['model']) else: feature_vocab = _build_feature_vocab(Option.src) pm = PM(feature_vocab, Option, lm_tgt, smoother=lm_tgt.smoother, lambdap=lm_tgt.lambdap, vocab_size=lm_tgt.vocab_size) logger.info('[Train_before] Loading the source language...:%s' % Option.src) Global.mx_dep = Option.mx_dep_train Global.feature_vocab = feature_vocab data_train = Data(Option.src, conll_reader, valid=lambda x: len(x) < Option.mx_len) lm_src_train = _load_lm('Train', pm.filter(data_train)) logger.info('[Train_before] Objective: %f' % pm.skewD(lm_tgt, lm_src_train)) logger.info('Training...') fit(pm, {'data': data_train, 'mx_itr': Option.mx_itr, 'batch_size': Option.batch_size}, nb_epoch=Option.mx_epoch, callbacks=_callbacks()) save_model(Option.model, Option.to_dict(), Global.to_dict()) Global.mx_dep = Option.mx_dep logger.info('[Test_before] Loading the source language...:%s' % Option.src) data_test = Data(Option.src, conll_reader) lm_data_test = _load_lm('Test_before', data_test) logger.info('[Test_before] Skew-Divergence: %f' % pm.skewD(lm_tgt, lm_data_test)) logger.info('[Test] Permuting&writing...:%s' % Option.output) write_conll(Option.output, pm.permute(data_test)) lm_data_test_out = _load_lm('Test_after', Data(Option.output, conll_reader)) logger.info('[Test_after] Skew-Divergence: %f' % pm.skewD(lm_tgt, lm_data_test_out))
def params_changed(self): # print(self.p.min(), self.p.max()) self.s = util.fit(self.s_slider.value(), 0, self.sliders_positions, self.s_min, self.s_max) self.r = util.fit(self.r_slider.value(), 0, self.sliders_positions, self.r_min, self.r_max) self.b = util.fit(self.b_slider.value(), 0, self.sliders_positions, self.b_min, self.b_max) print(self.dt_slider.value(), '$$$$$$') self.dt = util.fit(self.dt_slider.value(), 0, self.sliders_positions, self.dt_min, self.dt_max) lorenz.lorenz(self.p, self.s, self.r, self.b, self.steps, self.dt) # self.main_scatter_plot.setData(pos=self.p, size=np.ones(self.p.shape[0])*0.3, color=self.color, pxMode=False) self.main_scatter_plot.setData(pos=self.p, size=0.3, color=self.color, pxMode=False) self.xcurve.setData(self.p[:, 0], connect='finite') self.ycurve.setData(self.p[:, 1], connect='finite') self.zcurve.setData(self.p[:, 2], connect='finite') # print(self.steps, self.dt, f'min = {np.min(self.p)} max = {np.max(self.p)}') # self.xplot.setYRange(np.min(self.p[:, 0]), np.max(self.p[:, 0])) # self.yplot.setYRange(np.min(self.p[:, 1]), np.max(self.p[:, 0])) # self.zplot.setYRange(np.min(self.p[:, 2]), np.max(self.p[:, 1])) print(self.dt, '<<<<') # print(self.steps) self.steps_label.setText(f'steps {self.steps}') self.dt_label.setText(f'dt {self.dt}') self.s_label.setText(f'σ {self.s}') self.r_label.setText(f'ρ {self.r}') self.b_label.setText(f'β {self.b}')
#pick 14 indexes at random from 0 to 29 #cast to set to avoid repetition indeces = rd(14) L = length(indeces) #since I use points i and i+1 i need the length of the index list to be a multiple of 2 #if the list length is odd, the last element is ignored #initialize scores and losses tensors scores = torch.zeros(1, L // 2) loss = torch.zeros(1, L // 2) for i in range(0, L, 2): x1, y1, x2, y2 = x[indeces[i]], y[indeces[i]], x[indeces[ i + 1]], y[indeces[i + 1]] #fit a line to the two points m, q = fit(x1, x2, y1, y2) #now "count" the inliers #number_inliers = sum(inliers) would be a very valid ransac score #in this case i just get a vector of ones and zeroes and let the net regress a score from it inliers = torch.FloatTensor( [m * x[a] + q - y[a] == 0 for a in range(30)]) #now the score, which is going to be some function of the inliers scores[0][i // 2] = net(inliers) #now calculate the loss (quadratic loss) loss[0][i // 2] = (m - gm)**2 + (q - gq)**2 #finally calculate the expected loss of this example scores_n = torch.softmax(scores[0, :], dim=0) exp_loss = loss[0, :].dot(scores_n) #add up losses of the batch LOSS += exp_loss
import torch import torch.nn as nn import torch.utils.data as tud import torch.optim as optim import util import model BATCH_SIZE = 32 EPOCHS = 10 LEARNING_RATE = 0.001 train_dataset, test_dataset = util.load_data() train_loader = tud.DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4) test_loader = tud.DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4) model = model.CNN() print('[main]: ',model) optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE) error = nn.CrossEntropyLoss() util.fit(model, train_loader=train_loader,test_loader=test_loader, epochs=EPOCHS, batch_size=BATCH_SIZE, error=error, optimizer=optimizer) #util.save_model(model, 'model_b_32_e_10')
#print(sqrt((0.00005*0.5/0.025)**2 + (0.008*0.001/0.025)**2 + (0.008*0.5/0.025**2*0.001)**2)) Vs = unumpy.uarray([0.001, 0.002, 0.003, 0.004], [0.00003, 0.00003, 0.00003, 0.00003]) V = ufloat(0.01, 0.0003) C = Vs * C_figlia / V print(C) # Assorbanze A = [m16.A[n_max], m32.A[n_max], m48.A[n_max], m64.A[n_max]] # Fit def retta(x, m): return m*x print util.fit(unumpy.nominal_values(C), A, 11.6*unumpy.std_devs(C)) m, cov = scipy.optimize.curve_fit(retta, unumpy.nominal_values(C), A, sigma=(11.6*unumpy.std_devs(C))**-2) print(m, cov) chi2 = util.chi2_retta(m, 0, unumpy.nominal_values(C), A, 11.6*unumpy.std_devs(C)) print "chi2:", chi2 j = sqrt(chi2 / 3) print j old_C = C u = np.sqrt((11.6*unumpy.std_devs(C)*j)**2 - (11.6*unumpy.std_devs(C))**2) print u C = unumpy.uarray( unumpy.nominal_values(C), unumpy.std_devs(C)*j) m, cov = scipy.optimize.curve_fit(retta, unumpy.nominal_values(C), A, sigma=(11.6*unumpy.std_devs(C))**-2)
def init_ui(self): # pg.setConfigOption('background', 'w') # pg.setConfigOption('imageAxisOrder', 'row-major') pg.setConfigOption('background', 'w') pg.setConfigOption('foreground', 'k') self.setWindowTitle('Lorenz Attractor') # self.win = pg.GraphicsWindow(title='A 2d plot window') self.w = gl.GLViewWidget() self.w.opts['distance'] = 100 # self.w.opts['viewport'] = (0, 0, 400, 400) # self.w.setWindowTitle('pyqtgraph example: GLLine`em') # self.w.setGeometry(0, 110, 700, 700) # self.w.setMaximumHeight(100) grid_shift = 100 grid_spacing = 25 gx = gl.GLGridItem() gx.setSize(grid_shift, grid_shift, grid_shift) gx.rotate(90, 0, 1, 0) gx.translate(-grid_shift / 2, 0, grid_shift / 2) gx.setSpacing(grid_spacing, grid_spacing, grid_spacing) self.w.addItem(gx) gy = gl.GLGridItem() gy.setSize(grid_shift, grid_shift, grid_shift) gy.rotate(90, 1, 0, 0) gy.translate(0, -grid_shift / 2, grid_shift / 2) gy.setSpacing(grid_spacing, grid_spacing, grid_spacing) self.w.addItem(gy) gz = gl.GLGridItem() gz.setSize(grid_shift, grid_shift, grid_shift) gz.setSpacing(grid_spacing, grid_spacing, grid_spacing) # gz.translate(0, 0, -grid_shift/2) self.w.addItem(gz) # self.color = (1, 0, 0, 1) self.main_scatter_plot = gl.GLScatterPlotItem() self.w.addItem(self.main_scatter_plot) projections_height = 80 projections_width = 300 self.xplot = pg.PlotWidget() self.xplot.showGrid(x=True, y=True, alpha=0.1) # self.xplot.enableAutoRange() self.xcurve = self.xplot.plot(pen='b') self.xplot.setFixedSize(projections_width, projections_height) self.yplot = pg.PlotWidget() self.yplot.showGrid(x=True, y=True, alpha=0.1) # self.yplot.enableAutoRange() self.ycurve = self.yplot.plot(pen='b') self.yplot.setFixedSize(projections_width, projections_height) self.zplot = pg.PlotWidget() self.zplot.showGrid(x=True, y=True, alpha=0.1) # self.zplot.enableAutoRange() self.zcurve = self.zplot.plot(pen='b') self.zplot.setFixedSize(projections_width, projections_height) self.projections_1d_layout = QtGui.QVBoxLayout() # layoutgb = QtGui.QGridLayout() self.layout = QtGui.QHBoxLayout() self.right_layout = QtGui.QVBoxLayout() self.left_layout = QtGui.QVBoxLayout() self.steps_label = QtGui.QLabel() self.dt_label = QtGui.QLabel(str(self.dt)) self.s_label = QtGui.QLabel() self.r_label = QtGui.QLabel() self.b_label = QtGui.QLabel() self.sliders_positions = 2000 self.s_label.setStyleSheet('background-color: rgba(255, 0, 0, 0.2)') self.steps_slider = QtGui.QSlider(orientation=QtCore.Qt.Horizontal) self.steps_slider.setValue(self.steps) self.steps_slider.setRange(self.steps_min, self.steps_max) self.steps_slider.setTickInterval(1000) self.dt_slider = QtGui.QSlider(orientation=QtCore.Qt.Horizontal) print( int( util.fit(self.dt, self.dt_min, self.dt_max, 0, self.sliders_positions))) self.dt_slider.setValue( int( util.fit(self.dt, self.dt_min, self.dt_max, 0, self.sliders_positions))) self.dt_slider.setRange(0, self.sliders_positions) self.s_slider = QtGui.QSlider(orientation=QtCore.Qt.Horizontal) self.s_slider.setRange(0, self.sliders_positions) self.s_slider.setValue( util.fit(self.s, self.s_min, self.s_max, 0, self.sliders_positions)) # self.s_slider.setTickPosition(QtGui.QSlider.TicksBelow) # self.s_slider.setTickInterval(0.1) self.r_slider = QtGui.QSlider(orientation=QtCore.Qt.Horizontal) self.r_slider.setRange(0, self.sliders_positions) self.r_slider.setValue( util.fit(self.r, self.r_min, self.r_max, 0, self.sliders_positions)) # self.r_slider.setTickInterval(0.1) self.b_slider = QtGui.QSlider(orientation=QtCore.Qt.Horizontal) self.b_slider.setRange(0, self.sliders_positions) self.b_slider.setValue( util.fit(self.b, self.b_min, self.b_max, 0, self.sliders_positions)) # self.b_slider.setTickInterval(0.01) self.color = (1, 0.7, 0.4, 1) # self.color = np.array([ # (1*x, 0.2+0.5*x, 0.1+0.3*x, 1) # for x in np.linspace(0, 1, self.steps) # ]) self.lambda_label = QtGui.QLabel() self.calc_lambdas_button = QtGui.QPushButton('calc lambdas') # pos = self.p, size = np.ones(self.p.shape[0]) * 0.3, color = self.color, pxMode = False sliders_width = 300 self.steps_slider.setFixedWidth(sliders_width) self.dt_slider.setFixedWidth(sliders_width) self.s_slider.setFixedWidth(sliders_width) self.r_slider.setFixedWidth(sliders_width) self.b_slider.setFixedWidth(sliders_width) self.right_layout.addWidget(self.steps_slider) self.right_layout.addWidget(self.steps_label) self.right_layout.addWidget(self.dt_slider) self.right_layout.addWidget(self.dt_label) self.right_layout.addWidget(self.s_label) self.right_layout.addWidget(self.s_slider) self.right_layout.addWidget(self.r_label) self.right_layout.addWidget(self.r_slider) self.right_layout.addWidget(self.b_label) self.right_layout.addWidget(self.b_slider) self.right_layout.addWidget(self.lambda_label) self.right_layout.addWidget(self.calc_lambdas_button) self.right_layout.addWidget(self.xplot) self.right_layout.addWidget(self.yplot) self.right_layout.addWidget(self.zplot) self.params_changed() self.left_layout.addWidget(self.w) self.left_layout.addLayout(self.projections_1d_layout, stretch=2) self.layout.addLayout(self.left_layout) self.layout.addLayout(self.right_layout) self.setLayout(self.layout) self.setGeometry(0, 0, 1440, 900)