Esempio n. 1
0
def images_svm(pickled_file, x=None, all_labels=None, num_labels=15, TRAIN_SPLIT=0.8):

    FILE_SEPARATOR="/"
    if isinstance(pickled_file, str):
        try:
            features = pickle.load(open(pickled_file, 'r'))
        except:
            f = gzip.open(pickled_file, 'rb')
            features = pickle.load(f)
            f.close()
    else:
        cnn = pickled_file
        if x is None:
            print ("Reading images...")
            all_labels, x = pickleAllImages(num_labels=15, pos=True)
        print("Processing images...")
        input_width, input_height, dropout_percent = 300, 140, 0.2
        x = x.astype(np.float32).reshape((-1, 1, input_width, input_height))
        x *= np.random.binomial(1, 1 - dropout_percent, size=x.shape)
        # features = cnn.output_hiddenLayer(x)

        quarter_x = np.floor(x.shape[0] / 4)
        print("Starting cnn process...")
        train_last_hidden_layer_1 = cnn.output_hiddenLayer(x[:quarter_x])
        train_last_hidden_layer_1 = train_last_hidden_layer_1.reshape((train_last_hidden_layer_1.shape[0], -1))
        print("after first quarter train output")
        train_last_hidden_layer_2 = cnn.output_hiddenLayer(x[quarter_x:2 * quarter_x])
        train_last_hidden_layer_2 = train_last_hidden_layer_2.reshape((train_last_hidden_layer_2.shape[0], -1))
        print("after second quarter train output")
        train_last_hidden_layer_3 = cnn.output_hiddenLayer(x[2 * quarter_x: 3 * quarter_x])
        train_last_hidden_layer_3 = train_last_hidden_layer_3.reshape((train_last_hidden_layer_3.shape[0], -1))
        print("after third quarter train output")
        train_last_hidden_layer_4 = cnn.output_hiddenLayer(x[3 * quarter_x:])
        train_last_hidden_layer_4 = train_last_hidden_layer_4.reshape((train_last_hidden_layer_4.shape[0], -1))

        features = np.concatenate((train_last_hidden_layer_1, train_last_hidden_layer_2, train_last_hidden_layer_3, train_last_hidden_layer_4), axis=0)
        print('Features size: ', features.shape)
        print('Labels size: ', all_labels.shape)
        # features = pickled_file



    # if num_labels==15:
    #     labels_file = "pickled_images"+FILE_SEPARATOR+"articleCat.pkl.gz"
    # elif num_labels==20:
    #     labels_file = "pickled_images"+FILE_SEPARATOR+"topCat.pkl.gz"
    # elif num_labels==164:
    #     labels_file = "pickled_images"+FILE_SEPARATOR+"all164cat.pkl.gz"
    # elif num_labels==2081:
    #     labels_file = "pickled_images"+FILE_SEPARATOR+"all2081cat.pkl.gz"
    # else:
    #     print("bad labels path!!!!!!!")
    #
    # print("Loading labels")
    # with open(labels_file) as l:
    #     labels = pickle.load(l)[:features.shape[0], :]
    #     l.close()

    return features, all_labels
Esempio n. 2
0
def load_data(dataset, toShuffleInput = True , withZeroMeaning = True, labelset=None,start_index=0,end_index=16351,MULTI_POSITIVES=20,dropout_percent=0.1,TRAIN_DATA_PRECENT=0.8,VALIDATION_DATA_PRECENT=0.8):
    ''' Loads the dataset

    :type dataset: string
    :param dataset: the path to the dataset (here MNIST)
    '''

    #############
    # LOAD DATA #
    #############

    # Download the MNIST dataset if it is not present
    data_dir, data_file = os.path.split(dataset)
    if data_dir == "" and not os.path.isfile(dataset):
        # Check if dataset is in the data directory.
        new_path = os.path.join(
            os.path.split(__file__)[0],
            "..",
            "data",
            dataset
        )

#     print '... loading data'

#     f1 = gzip.open('ISH-noLearn_0_99_300_140.pkl.gz', 'rb')
#     f2 = gzip.open('ISH-noLearn_100_199_300_140.pkl.gz', 'rb')
#     train_set1, valid_set1, test_set1 = cPickle.load(f1)
#     f1.close()
#     train_set2, valid_set2, test_set2 = cPickle.load(f2)
#     f2.close()
# 
#     train_set = (numpy.concatenate((train_set1[0],train_set2[0]),axis=0), numpy.concatenate((train_set1[1],train_set2[1]),axis=0)) 

    
    
    if labelset is None:
        # Load the dataset
        f = gzip.open(dataset, 'rb')
        train_set, valid_set, test_set = cPickle.load(f)
        f.close()
        
        with open(labelset) as l:
            pLabel = cPickle.load(l)
            l.close()
    else:
#         # Load the dataset for article cat
# #         f = gzip.open(dataset, 'rb')
# #         pData= cPickle.load(f)
# #         f.close()
# 

          
        fileName = "piclked_articleCat_" + str(end_index) + "_" + str(dropout_percent)
        try:
            f = gzip.open("pickled_images/" + fileName + ".pkl.gz", 'rb')
            pLabel, pData = cPickle.load(f)
            f.close()
        except:
            pLabel, pData = pickleAllImages(num_labels=labelset,TRAIN_SPLIT=TRAIN_DATA_PRECENT, end_index=end_index,MULTI=MULTI_POSITIVES,dropout_percent=dropout_percent)
            f = gzip.open("pickled_images/" + fileName + ".pkl.gz",'wb')
            cPickle.dump((pLabel, pData), f, protocol=2)
            f.close()
        
        # Divided dataset into 3 parts. 
        dataAmount = pData.shape[0] #end_index-start_index
        train_index = numpy.floor(dataAmount*TRAIN_DATA_PRECENT);
        validation_index = numpy.floor(dataAmount*VALIDATION_DATA_PRECENT)
        test_index = dataAmount

        train_set_x = pData[:train_index]
        val_set_x = pData[train_index:validation_index]
        test_set_x = pData[validation_index:test_index]
        train_set_y = pLabel[:train_index]
        val_set_y = pLabel[train_index:validation_index]
        test_set_y = pLabel[validation_index:test_index]
        
        train_set = train_set_x, train_set_y
        valid_set = val_set_x, val_set_y
        test_set = test_set_x, test_set_y         
        
    #train_set, valid_set, test_set format: tuple(input, target)
    #input is an numpy.ndarray of 2 dimensions (a matrix)
    #witch row's correspond to an example. target is a
    #numpy.ndarray of 1 dimensions (vector)) that have the same length as
    #the number of rows in the input. It should give the target
    #target to the example with the same index in the input.
    
    
    
    def zero_meaning(train_set, valid_set, test_set):
        train_x, train_y = train_set
        valid_x, valid_y = valid_set
        test_x, test_y = test_set
        
        all_data = numpy.concatenate((train_x , valid_x , test_x), axis=0)
#         print all_data
        data_mean = numpy.mean(all_data)
        print "mean-" 
        print data_mean
        data_var = numpy.var(all_data, ddof=1)
        print "var-" 
        print data_var
        
        def updateData(data):
            data -= data_mean
            data = numpy.divide(data,data_var)
        
        updateData(train_x)
        updateData(valid_x)
        updateData(test_x)
    
    def shared_dataset(data_xy, borrow=True , toShuffleInput = False ):
        """ Function that loads the dataset into shared variables

        The reason we store our dataset in shared variables is to allow
        Theano to copy it into the GPU memory (when code is run on GPU).
        Since copying data into the GPU is slow, copying a minibatch everytime
        is needed (the default behaviour if the data is not in a shared
        variable) would lead to a large decrease in performance.
        """
        data_x, data_y = data_xy
        
        if(toShuffleInput):
            correspondenceZip  = zip(data_x, data_y)
            random.shuffle(correspondenceZip)
            data_x = [e[0] for e in correspondenceZip]
            data_y = [e[1] for e in correspondenceZip]
       
             
#         shared_x = theano.shared(numpy.asarray(data_x,
#                                                dtype='float64'), #theano.config.floatX),
#                                  borrow=borrow)
#         shared_y = theano.shared(numpy.asarray(data_y,
#                                                dtype='float64'), #theano.config.floatX),
#                                  borrow=borrow)
#         # When storing data on the GPU it has to be stored as floats
#         # therefore we will store the labels as ``floatX`` as well
#         # (``shared_y`` does exactly that). But during our computations
#         # we need them as ints (we use labels as index, and if they are
#         # floats it doesn't make sense) therefore instead of returning
#         # ``shared_y`` we will have to cast it to int. This little hack
#         # lets ous get around this issue
#         return shared_x, shared_y #T.cast(shared_y, 'int32')
        return numpy.array(data_x) , numpy.array(data_y) 


    if (withZeroMeaning):
        zero_meaning(train_set, valid_set, test_set)

    test_set_x, test_set_y = shared_dataset(test_set)
    valid_set_x, valid_set_y = shared_dataset(valid_set)
    train_set_x, train_set_y = shared_dataset(train_set , toShuffleInput = True)

    rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
            (test_set_x, test_set_y)]
    return rval