def predict(models, img, t=0): img = np.clip(img, 0, 1) * 255 img = extend_data(config['permutation'], np.array([img])) scores = np.hstack([m.predict(img) for m in models])[0] #print(scores.shape) nat_labels = np.zeros(scores.shape).astype(np.float32) nat_labels[scores >= 0.5] = 1. rep = rep_labels[:len(scores)].T tmp = np.repeat([nat_labels], rep.shape[0], axis=0) dists = np.sum(np.absolute(tmp - rep), axis=-1) min_dist = np.min(dists) pred_labels = np.arange(len(dists))[dists == min_dist] pred_scores = [ np.sum([ scores[k] if rep[j][k] == 1 else 1 - scores[k] for k in np.arange(len(scores)) ]) for j in pred_labels ] pred_label = pred_labels[np.argmax(pred_scores)] if min_dist <= 0: return pred_label else: return -1
minimum = {key: [] for key in model.params} maximum = {key: [] for key in model.params} for i in range(iterations): X_batch = data[0][batch_index * batch_size:(batch_index + 1) * batch_size] Y_batch = data[1][batch_index * batch_size:(batch_index + 1) * batch_size] batch_index = (batch_index + 1) % batches gradients, loss = gradient_loss(model, X_batch, Y_batch) loss = loss.asnumpy()[0] loss_history.append(loss) for key, value in zip(model.params.keys(), gradients): mean[key].append(np.mean(value).asnumpy()) std[key].append(np.std(value).asnumpy()) L_2[key].append(np.mean(value**2).asnumpy()) minimum[key].append(np.min(value).asnumpy()) maximum[key].append(np.max(value).asnumpy()) updater.update(gradients) if (i + 1) % rescaling_interval == 0: rescale(mlp, data[2], model.params) # validation data print 'rescaled' if (i + 1) % interval == 0: print 'iteration %d loss %f' % (i + 1, loss) pickle.dump((loss_history, mean, std, L_2, minimum, maximum), open('dr-g-norm-%d' % rescaling_interval, 'wb'))
def red5(x): return mp.min(x, axis=0, keepdims=True)
def red4(x): return mp.min(x, axis=0)
def red3(x): return mp.min(x, axis=1, keepdims=True)
def red2(x): return mp.min(x, axis=1)
def red1(x): return mp.min(x)
print(str(feature)) dirTrain = dirTrains[0] #将txt文件中的moments读取到矩阵f中 #矩阵F用来作中间运算 f_train = np.loadtxt(dirTrain + "image_train_features.txt", delimiter=' ') F = np.empty(f_train.shape, dtype=float) #与模型库中的所有图片中心矩比较l F = f_train - feature #求出曼哈顿距离最小的图片 F = np.abs(F) s = np.sum(F, axis=1) index = np.argmin(s) m = np.min(s) print(str(f_train[index])) print(m) #获得测试图片的预测偏转角度 flag = 0 #读取图像名字txt文件 image_train_f = open(dirTrain + 'image_train_list.txt', 'r') img_name_train = image_train_f.readline() img_name_train = img_name_train.strip('\n') while flag < index: flag = flag + 1 img_name_train = image_train_f.readline() img_name_train = img_name_train.strip('\n') image_train_f.close()
def red5(x): return mp.min(x, axis=0, keepdims=True)
def red4(x): return mp.min(x, axis=0)
def red3(x): return mp.min(x, axis=1, keepdims=True)
def red2(x): return mp.min(x, axis=1)
def red1(x): return mp.min(x)
def barrier(S0, K, B, tau, r, q, v, M, N): S = pricepaths(S0, tau, r, q, v, M, N) l = np.min(S, 0) > B payoffs = l * np.maximum(S[-1, :] - K, 0) return math.exp(-r * tau) * np.mean(payoffs)