コード例 #1
0
def LoadImages(X):
    tic()
    Images=[]
    Labels=[]
    for img_name in X:
        if img_name.endswith(".tif"):
            img = cv2.imread(CONST.PATH+'IMAGES/' + img_name,-1)
            label = cv2.imread(CONST.PATH+'LABELS2/' + img_name,-1)

            img= np.expand_dims(img, axis=0)
            label= np.expand_dims(label, axis=0)

            if Images == []:
                Images=img
                Labels=label

            else:
                Images=np.concatenate((Images, img), axis=0).astype(np.float32)
                Labels = np.concatenate((Labels, label), axis=0).astype(np.float32)
               #APPEND
        else:
            print('Wrong Image Type Found! Matrix is saved as all Zeros!')

    toc("Images Loaded.")
    return Images/255, Labels
コード例 #2
0
def CoreSample(model, image_names, save, output_filename, test_flag, minmax_filename):
    if model==[]:
            model = LoadPretrainedModel(CONST.PRETRAINED_MODEL, weights=1)
    if test_flag==0:
        images,labels= LoadImages(image_names)
    else:
        images =LoadImages_Test(image_names,output_filename) #output_filename : path of images during testing
    ctr=0 #Start of Image Number

    #This loop is here to limit saving a maximum of 20 images on a file. Bigger than that has memory issues.
    for i in range(int(np.ceil((images.shape[0])/CONST.MAXIMAGES_PER_FILE))):

        last=min(len(images),ctr+CONST.MAXIMAGES_PER_FILE)
        maps = GenerateCore(model, images[ctr:last, :, :])
        maps = maps.reshape(len(maps) * CONST.NUM_PIXELS, CONST.FEATURES)

        if test_flag==0:
            tic()
            targets = CreateTargets(labels[ctr:last,:,:])
            targets = targets.reshape(len(maps))
            UpdateNorm(find_minmax(maps), minmax_filename)
            toc('Targets Created. MinMax updated .')
        ctr=ctr+CONST.MAXIMAGES_PER_FILE

        #Saving normal images before training (or even testing if it has labels
        if save == 1:
            SaveData(maps.astype(np.float32), targets.astype(np.float32), output_filename + '_' + str(ctr) + '.h5')
        #Saving test images without labels
        elif save==2:
            print('Implementation Removed from this file!')
        else:
            print('Skipped saving.')
            return maps,[]

    return
コード例 #3
0
def SaveData(X, Y, output_filename):
    tic()
    f = h5py.File(output_filename, 'w')
    grp = f.create_group("my_data")
    grp.create_dataset('X', data=X, compression="gzip")
    grp.create_dataset('Y', data=Y, compression="gzip")
    f.close()
    toc('Data File saved to disk.')
    return
コード例 #4
0
def LoadPretrainedModel(filename, weights):
    tic()
    if weights == 0:  # 1: load from weights file, 0: load from pickle file
        model = pickle.load(open('kerasmodel', 'rb'))
    else:
        model = VGG_16(filename)
        sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
        model.compile(optimizer=sgd, loss='categorical_crossentropy')

    toc("Model Loaded. Compiled.")
    # pickle.dump(model,open ('kerasmodel','wb'))
    return model
コード例 #5
0
def LoadImages_Test(X,path):
    tic()
    Images=[]
    for img_name in X:
        if img_name.endswith(".tif"):
            img = cv2.imread(path +'/'+ img_name,-1)

            img= np.expand_dims(img, axis=0)

            if Images == []:
                Images=img

            else:
                Images=np.concatenate((Images, img), axis=0).astype(np.float32)
               #APPEND
        else:
            print('Wrong Image Type Found! Matrix is saved as all Zeros!')

    toc("Images Loaded.")
    return Images/255
コード例 #6
0
def convert_dataset(dataset,c):
    train_set, valid_set= dataset[0], dataset[1]
    assert (train_set[0].shape)[1] == (valid_set[0].shape)[1], \
        "Number of features for train,val do not match: {} and {}.".format(train_set.shape[1], valid_set.shape[1])

    def shared_dataset(data_xy, borrow=True,classification=c):
        data_x, data_y = data_xy
        shared_x = theano.shared(numpy.asarray(data_x, dtype=theano.config.floatX), borrow=borrow)
        shared_y = theano.shared(numpy.asarray(data_y, dtype=theano.config.floatX), borrow=borrow)
        if classification:
            shared_y = T.cast(shared_y, 'int32')
        return shared_x,shared_y

    tic()

    train_set_x, train_set_y = shared_dataset(train_set)
    valid_set_x, valid_set_y = shared_dataset(valid_set)
    # test_set_x, test_set_y = shared_dataset(test_set)
    toc('Dataset Converted to shared dataset.')

    num_features = (train_set[0].shape)[1]
    rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y)]#, (test_set_x, test_set_y)]
    return rval, num_features
コード例 #7
0
def load_saved_data(folder, train, num_imgs):
    if train == 1:
        tic()
        data_list = listdir(folder)
        X_all = []
        Y_all = []
        # random.shuffle(data_list)
        ctr = epoch_ctr % len(data_list)

        file_name = data_list[ctr]
        train_file = h5py.File(folder + file_name, 'r')
        X = train_file['/my_data/X'][:]
        Y = train_file['/my_data/Y'][:]
        start = random.randint(0,
                               int(X.shape[0] / CONST.NUM_PIXELS) - num_imgs)
        if X_all == []:
            X_all = X[start * CONST.NUM_PIXELS:(start + num_imgs) *
                      CONST.NUM_PIXELS, :]
            Y_all = Y[start * CONST.NUM_PIXELS:(start + num_imgs) *
                      CONST.NUM_PIXELS]
        else:
            Y_all = np.concatenate((Y_all, Y), axis=0)
        #Uncomment Line below
        X_all = Normalize(X_all)
        # Y_all=np.expand_dims(Y_all,axis=1)
    elif train == 0:
        tic()
        data_list = listdir(folder)
        X_all = []
        Y_all = []
        # random.shuffle(data_list)
        # for i in range(len(data_list)):
        file_name = data_list[0]  #file_ctr]
        test_file = h5py.File(folder + file_name, 'r')
        X = test_file['/my_data/X'][:]
        Y = test_file['/my_data/Y'][:]
        if X_all == []:
            X_all = X
            Y_all = Y
        else:
            X_all = np.concatenate((X_all, X), axis=0)
            Y_all = np.concatenate((Y_all, Y), axis=0)
        X_all = Normalize(X_all)

    elif train == 2:
        tic()
        data_list = listdir(folder)
        data_list.sort()
        X_all = []
        Y_all = []
        # for i in range(len(data_list)):
        # random.shuffle(data_list)
        file_name = data_list[file_ctr]
        test_file = h5py.File(folder + file_name, 'r')
        X = test_file['/my_data/X'][:]
        if X_all == []:
            X_all = X
        else:
            X_all = np.concatenate((X_all, X), axis=0)
        X_all = Normalize(X_all)

    X = X_all

    Y = (Y_all)

    # tic()
    # f = h5py.File('/home/exx/PycharmProjects/SAR/Norm_'+file_name, 'w')
    # grp = f.create_group("my_data")
    # grp.create_dataset('X', data=X, compression="gzip")
    # grp.create_dataset('Y', data=Y, compression="gzip")
    # f.close()
    # toc('Data File saved to disk.')
    toc("Data loaded and Normalized from " + folder + file_name)
    # import os
    # os.remove(folder+file_name)
    return X, Y
コード例 #8
0
def GenerateCore(model, Images):
    # from keras import backend as K

    tic()
    num_samples = len(Images)

    layers_extract = CONST.LAYER_NUMS
    all_hc = np.zeros((num_samples, CONST.NUM_PIXELS, CONST.FEATURES))

    layers = [model.layers[li].output for li in layers_extract]
    get_feature = theano.function([model.layers[0].input], layers,
                                  allow_input_downcast=False)


    def extract_hypercolumn(instance):
        # fc6_output = get_fc6([instance])
        # fc7_output = get_fc7([instance, 0])
        feature_maps = get_feature(instance)
        hypercolumns = np.zeros((CONST.NUM_PIXELS, CONST.FEATURES))
        # fc6_map= np.reshape(fc6_output, (64,64))
        # fc7_map= np.reshape(fc7_output, (64,64))
        # hypercolumns[:, 1473] = np.reshape(sp.misc.imresize(fc6_map, size=(224, 224), mode="F", interp='bilinear'), (50176))
        # hypercolumns[:, 1474] = np.reshape(sp.misc.imresize(fc7_map, size=(224, 224), mode="F", interp='bilinear'), (50176))

        original= instance[:,0, :, :] + .407
        hypercolumns[:, 0] = np.reshape(original, (CONST.NUM_PIXELS))

        ctr = 1
        for convmap in feature_maps:
            for fmap in convmap[0]:
                upscaled = sp.misc.imresize(fmap, size=(CONST.IMAGE_DIM),
                                            mode="F", interp='bilinear')

                hypercolumns[:, ctr] = np.reshape(upscaled, (CONST.NUM_PIXELS))
                ctr += 1
        return np.asarray(hypercolumns)

    print("Starting Loop")
    counter = 0
    for i in range(len(Images)):

        Y_Channel = Images[i, :, :]

        R = Y_Channel - .407
        G = Y_Channel - .458
        B = Y_Channel - .485
        Y_Image = np.stack((R, G, B), axis=0)
        Y_Image = np.expand_dims(Y_Image, axis=0).astype(np.float32)
        hc = extract_hypercolumn(Y_Image)
        hc = np.expand_dims(hc, axis=0)

        all_hc[counter] = hc

        counter += 1
        if not counter % np.ceil(num_samples/10):
            print(counter)
        if not counter % num_samples:
            break

    toc("Hypercolumns Extracted.")
    return all_hc