コード例 #1
0
ファイル: DNN.py プロジェクト: GRajaraju/Deep-Learning
def compute_cost(AL, y):
    m = Y.shape[1]

    cost = -(1 / m) * np.sum(
        np.dot(Y,
               np.log(AL).T) + np.dot((1 - Y),
                                      np.log(1 - AL).T))
    cost = np.sqeeze(cost)
    assert (cost.shape == ())
    return cost
コード例 #2
0
 def draw_activation_map_2d(self, imgs, label):
     self.model.eval()
     params = list(self.model.parameters())
     weight_softmax = np.sqeeze(params[-2].detach().data.numpy())
     imgs = self.transforms(imgs)
     if torch.cuda.is_available():
         imgs = imgs.cuda()
     _ = self.model.forward(imgs)
     CAM = self.returnCAM2d(self.features_blob.weight_softmax, label)
     self.features_blob = []
     return CAM
コード例 #3
0
ファイル: tensor_utils.py プロジェクト: hbwzhsh/Destiny_RNN
def generateGun(model, start, num_chars=2000, vocab_size=256):
    # Begin TF Session
    with tf.Session() as sess:
        # Start with the begin token
        state = None
        new_gun = [start]
        # Generate characters
        for i in tqdm(range(num_chars), desc='Characters'):
            # Initialize
            if state is not None:
                feed_dict = {
                    model['x']: [[new_gun]],
                    model['init_state']: state
                }
            else:
                feed_dict = {model['x']: [[new_gun]]}

            pred, state = sess.run([model['preds'], model['final_state']],
                                   feed_dict)
            sampled = np.random.choice(vocab_size, 1, p=np.sqeeze(pred))[0]
            new_gun.append(sampled_word)

    return new_gun
コード例 #4
0
def draw_activation_map_2d(imgs, labels, model, transforms):
    features_blob = []

    def get_activation_state(self, input, output):
        return features_blob.append(output.detach().data.cpu().numpy())

    def returnCAM(feature_conv, weight_softmax, class_idx):
        # generate the class activation maps upsample to identical to input_img_size
        size_upsample = (256, 256)
        bz, nc, h, w = feature_conv.shape
        output_cam = []
        for idx in class_idx:
            cam = weight_softmax[idx].dot(feature_conv.reshape((nc, h * w)))
            cam = cam.reshape(h, w)
            cam = cam - np.min(cam)
            cam_img = cam / np.max(cam)
            cam_img = np.uint8(255 * cam_img)
            yield cam_img

    # check the model is adapted to Activation maps by checking the second-last layer type.
    layer = check_architecture(model)

    if layer != None:
        layer.register_forward_hook(get_activation_state())
    else:
        print("model {} architecture cannot adapted to the activation map.")
        pass

    model.eval()
    params = list(model.parameters())
    weight_softmax = np.sqeeze(params[-2].detach().data.numpy())

    imgs = transforms(imgs)
    if torch.cuda.is_available():
        img = img.cuda()
    predict = model.forward(img)
    returnCAM
コード例 #5
0
learning_rates = [0.1, 0.001]
models = {}
for i in learning_rates:
    print("learning rate is:" + str(i))
    models[str(i)] = model(train_set_x,
                           test_set_x,
                           train_set_y,
                           test_set_y,
                           iterations=1500,
                           learing_rate=0.005,
                           print_cost=False)
    print('\n' + -------------------+ '\n')

for i in learning_rates:
    plt.plot(np.sqeeze(models[str(i)]["costs"]),
             label=str(models[str(i)]["learning_rate"]))
plt.ylabel('costs')
plt.xlabel('iterations')

legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()

#my_image='/home/shruti/Downloads/train_catvnoncat.h5'

#fname="/image" +my_image
#image = np.array(ndimage.imread(fname, flatten=False))
#my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T
#my_predicted_image = pred(d["w"], d["b"], my_image)
コード例 #6
0
        sess.run(tf.global_variables_initializer())
        x0 = np.random.rand(d)
        print('Begin to run SVRG-BB:')
        x = svrg_bb(grad,
                    1e-3,
                    n,
                    d,
                    tensor_x=par,
                    func=loss,
                    sess=sess,
                    par=par,
                    whole_data=whole_data,
                    max_epoch=50)
        y_predict = np.sign(np.dot(A_test, x))
        print('Test accuracy: %f' %
              (np.count_nonzero(y_test == np.sqeeze(y_predict)) * 1.0 / n))

    # test SGD-BB
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        print('\nBegin to run SGD-BB:')
        x = sgd_bb(grad,
                   1e-3,
                   n,
                   d,
                   phi=lambda k: k,
                   tensor_x=par,
                   func=loss,
                   sess=sess,
                   par=par,
                   whole_data=whole_data,
コード例 #7
0
 def _compute_cost(self, AL, Y):
     return np.sqeeze(self.cost_function(AL, Y))