def main(): if len(sys.argv) < 2: print('must specify 2 files') print(_f('usage: {sys.argv[0]} <file1> <file2>')) return f1, f2 = open(sys.argv[1]), open(sys.argv[2]) compareFiles(f1, f2, [lambda a,b: b-a, lambda a,b: (a-b)/b])
def get_filename(self): fname = filedialog.askopenfilename(title='quantify - Open') print(_f("Opening {fname}"))
for idx in range(int(np.floor(train_size / batch_size))): X = X_train[idx*batch_size:(idx+1)*batch_size] Y = Y_train[idx*batch_size:(idx+1)*batch_size] # Compute the gradient w_grad, b_grad = u._gradient(X, Y, w, b) # gradient descent update # learning rate decay with time w = w - learning_rate/np.sqrt(step) * w_grad b = b - learning_rate/np.sqrt(step) * b_grad step = step + 1 # Compute loss and accuracy of training set and development set y_train_pred = u._f(X_train, w, b) Y_train_pred = np.round(y_train_pred) train_acc.append(u._accuracy(Y_train_pred, Y_train)) train_loss.append(u._cross_entropy_loss(y_train_pred, Y_train) / train_size) y_dev_pred = u._f(X_dev, w, b) Y_dev_pred = np.round(y_dev_pred) dev_acc.append(u._accuracy(Y_dev_pred, Y_dev)) dev_loss.append(u._cross_entropy_loss(y_dev_pred, Y_dev) / dev_size) print('Training loss: {}'.format(train_loss[-1])) print('Development loss: {}'.format(dev_loss[-1])) print('Training accuracy: {}'.format(train_acc[-1])) print('Development accuracy: {}'.format(dev_acc[-1])) # Plot
def window_callback(self, event): print(_f('{event.widget} resized ({event.width}, {event.height})')) return 'break'