コード例 #1
0
def get_data_from_benchmark(mode, one_hot=True):
    """Get data from Bengio's ICML 2007 paper "An Empirical Evaluation of Deep Architectures
	   on Problems with Many Factors of Variation"
	"""

    if mode == "MNIST":
        (X_train, y_train), (X_test, y_test) = get_MNIST(mode="original",
                                                         digit_per_class=None,
                                                         one_hot=False)
        X_train = np.reshape(X_train, (-1, 784))
        X_test = np.reshape(X_test, (-1, 784))
        num_classes = 10
    else:
        if mode == "MNIST-BG-IMG-ROT":
            filename = "datasets/MNIST-BG-IMG-ROT/MNIST-BG-IMG-ROT_{0}.amat"
            num_classes = 10
        elif mode == "RECT":
            filename = "datasets/rectangles/rectangles_{0}.amat"
            num_classes = 2
        elif mode == "CONVEX":
            filename = "datasets/convex/convex_{0}.amat"
            num_classes = 2
        data_train = np.genfromtxt(filename.format("train"))
        data_test = np.genfromtxt(filename.format("test"))
        X_train = data_train[:, :-1]
        y_train = data_train[:, -1].astype(int)
        X_test = data_test[:, :-1]
        y_test = data_test[:, -1].astype(int)
    if one_hot:
        y_train = dense_to_one_hot(y_train, num_classes=num_classes)
        y_test = dense_to_one_hot(y_test, num_classes=num_classes)
    else:
        y_train = np.expand_dims(y_train, 1)
        y_test = np.expand_dims(y_test, 1)
    return (X_train, y_train), (X_test, y_test)
コード例 #2
0
def get_xor_binomial(
    input_size=6,
    positive_proba_train=0.5,
    positive_proba_test=0.5,
    num_train=100,
    num_test=100,
    one_hot=True,
    plus_minus_format=False,
):
    """
	Generate the data whose label is an XOR function of the binary input
	
	Parameters
	----------
	input_size : int
	  Size of the binary input.
	  
	positive_proba_train : float
	  Probability of each element of training example being 1.
	  
	positive_proba_test : float
	  Probability of each element of testing example being 1.
	  
	num_train : int
	  Number of training data.
	  
	num_test : int
	  Number of testing data.

	one_hot: bool
	  Whether the labels y_train and y_test are in one_hot format. Default True.

	plus_minus_format : bool
      Use 1 and -1 instead of 1 and 0 as binary encoding. Default False.
	"""
    assert 0 <= positive_proba_train <= 1, "positive_proba_train must be inside [0,1]!"
    assert 0 <= positive_proba_test <= 1, "positive_proba_test must be inside [0,1]!"
    X_train = choice(2,
                     size=[num_train, input_size],
                     p=[1 - positive_proba_train, positive_proba_train])
    X_test = choice(2,
                    size=[num_test, input_size],
                    p=[1 - positive_proba_test, positive_proba_test])
    y_train = np.sum(X_train, 1) % 2
    y_test = np.sum(X_test, 1) % 2
    if plus_minus_format:
        X_train = 2 * X_train - 1
        X_test = 2 * X_test - 1
    if one_hot:
        y_train = dense_to_one_hot(y_train, 2)
        y_test = dense_to_one_hot(y_test, 2)
    else:
        y_train = np.expand_dims(y_train, 1)
        y_test = np.expand_dims(y_test, 1)
    return (X_train, y_train), (X_test, y_test)
コード例 #3
0
def get_XOR(
    input_size=6,
    one_hot=True,
    plus_minus_format=False,
):
    """
	Generate the data whose label is an OR function of all possible combinations of input bits.
	
	Parameters
	----------
	input_size : int
	  Number of input bits.
	  
	one_hot: bool
	  Whether the labels y_train and y_test are in one_hot format. Default True.
	
	plus_minus_format : bool
      Use 1 and -1 instead of 1 and 0 as binary encoding. Default False.
	"""

    n = 2**input_size

    def bits(i):
        return np.unpackbits(np.uint8(i))[-input_size:]

    X = np.array(map(bits, range(n))).astype('int')
    y = np.array(map(XOR, X)).astype('int')
    if plus_minus_format:
        X = 2 * X - 1
    if one_hot:
        y = dense_to_one_hot(y, 2)
    else:
        y = np.expand_dims(y, 1)
    return (X, y)
コード例 #4
0
def get_rolling_sequence(
    code="101011",
    input_size=10,
    num_train=100,
    num_test=100,
    one_hot=True,
):
    """
	Generates 1D rolling binary sequences with label y indicating how much the original sequence has been rolled.
	The sequence has periodic boundary conditions.

	Parameters
	----------
	code : str
		The code given. The label y = 1 if and only if the binary sequence contains the code given.

	input_size : int
		size of the image that the code can be translated on.

	num_train : int
		Number of training data.

	num_test : int
		Number of testing data.

	one_hot: bool
		Whether the labels y_train and y_test are in one_hot format. Default True.
	"""
    assert len(
        code) <= input_size, "The length of code should not exceed the input"
    from sklearn.model_selection import train_test_split
    num_examples = num_train + num_test
    sequence = np.pad(np.array([eval(element) for element in list(code)]),
                      [0, input_size - len(code)], "constant")
    full_data = np.repeat(np.expand_dims(sequence, 0), num_examples, axis=0)
    data_labels = choice(
        range(input_size),
        size=num_examples)  # Each code is randomly translated on the image.
    data_examples = np.array([
        np.roll(full_data[i], data_labels[i], axis=0)
        for i in range(num_examples)
    ])

    if one_hot:
        data_labels = dense_to_one_hot(data_labels, num_classes=input_size)
    else:
        data_labels = np.expand_dims(data_labels, 1)

    X_train, X_test, y_train, y_test = train_test_split(
        data_examples,
        data_labels,
        test_size=num_test / float(num_train + num_test))
    return (X_train, y_train), (X_test, y_test)
コード例 #5
0
def rolloutActions():
    global controller

    actions = []
    probs = []
    adj = sp.coo_matrix((np.array([0]), (np.array([0]), np.array([0]))),
                        shape=(1, 1),
                        dtype=np.float32)
    features = sp.csr_matrix(dense_to_one_hot(np.array([0]), K),
                             dtype=np.float32)
    for ite in range(T):
        adj_torch = sparse_mx_to_torch_sparse_tensor(
            normalize(adj + adj.T.multiply(adj.T > adj) -
                      adj.multiply(adj.T > adj) + sp.eye(adj.shape[0])))
        features_torch = torch.FloatTensor(
            np.array(normalize(features).todense()))
        prob, action = controller(Variable(features_torch), adj_torch)
        probs.append(prob)
        actions.append(action)
        adj, features = update_architecture(
            adj, features, dense_to_one_hot(action.data.numpy(), K + 1))
    return actions, probs, build_child_model(adj, features)
コード例 #6
0
def get_and_or_binomial(
        input_size=10,
        num_train=1000,
        num_test=1000,
        fraction_single_example=0.5,  #fraction of label 1 (for and) and label 0 (for or)
        one_hot=True,
        mode="and",  # Choose from "and" and "or"
):
    from sklearn.model_selection import train_test_split
    # Get examples:
    if fraction_single_example is None:
        data_examples = np.random.randint(2,
                                          size=(num_train + num_test,
                                                input_size))
    else:
        from random import shuffle
        data_examples1 = np.random.randint(
            2,
            size=(int(
                (num_train + num_test) *
                (1 - fraction_single_example + 2**(-input_size))), input_size))
        if mode == "and":
            data_examples2 = np.ones((int(
                (num_train + num_test) *
                (fraction_single_example + 2**(-input_size))), input_size))
        elif mode == "or":
            data_examples2 = np.zeros((int(
                (num_train + num_test) *
                (fraction_single_example + 2**(-input_size))), input_size))
        else:
            raise Exception("mode {0} not recognized!".format(mode))
        data_examples = np.concatenate((data_examples1, data_examples2))

    # Get labels:
    if mode == "and":
        data_labels = (np.sum(data_examples, 1) == input_size).astype(int)
    elif mode == "or":
        data_labels = (np.sum(data_examples, 1) >= 1).astype(int)

    # Process one_hot:
    if one_hot:
        data_labels = dense_to_one_hot(data_labels, 2)
    else:
        data_labels = np.expand_dims(data_labels, 1)

    X_train, X_test, y_train, y_test = train_test_split(
        data_examples,
        data_labels,
        test_size=num_test / float(num_train + num_test))
    return (X_train, y_train), (X_test, y_test)
コード例 #7
0
def get_logical(
    input_size=10,
    mode="and",  # Choose from "and", "or" or "xor"
    num_train=1000,
    num_test=1000,
    one_hot=False,
):
    from sklearn.model_selection import train_test_split
    num_examples = num_train + num_test

    data_examples = []
    for i in range(num_examples):
        num_ones = choice(input_size + 1)
        ones_pos = choice(input_size, size=num_ones, replace=False)

        data_example = np.zeros(input_size)
        data_example[ones_pos] = 1
        data_examples.append(data_example.tolist())
    data_examples = np.array(data_examples)

    if mode == "and":
        data_labels = (np.sum(data_examples, 1) == input_size).astype(int)
    elif mode == "or":
        data_labels = (np.sum(data_examples, 1) >= 1).astype(int)
    elif mode == "xor":
        data_labels = (np.sum(data_examples, 1) % 2).astype(int)

    # Process one_hot:
    if one_hot:
        data_labels = dense_to_one_hot(data_labels, 2)
    else:
        data_labels = np.expand_dims(data_labels, 1)

    X_train, X_test, y_train, y_test = train_test_split(
        data_examples,
        data_labels,
        test_size=num_test / float(num_train + num_test))
    return (X_train, y_train), (X_test, y_test)
コード例 #8
0
def get_MNIST(
        mode="random translation",
        core_size=(22, 22),
        target_image_size=(28, 28),
        digit_per_class=None,
        one_hot=False,
):
    """This function fetches the original MNIST dataset.
	Both the X_train and X_test have the shape of (-1, 28, 28).

	Parameters
	----------
	X : np.array
	  Input feature matrix (N, row, column), 3D numpy array
	
	mode : str
	  The method to present the training and test image. Choose from the following modes:
	  "original" : return the training and testing images intact.
	  "random translation": return the training and testing images with random translation, 
	  		within the boundary of the image
	  "random translation torus": return the training and testing images with random translation, 
	  		with periodic boundary condition.

	core_size : (int, int)
  	  (image_height, image_width). The height and width of the core image. It can be any size, 
  	  larger or smaller than the cropped image. Default (22, 22) to remain the cropped image size
  	  after cropping the margin of MNIST image. Only effective when mode != "original".

	target_image_size : (int, int)
	  (target_image_height, target_image_width). The target image height and width. If the 
	  core_size is smaller than the target_image_size, this function will pad the image to 
	  the right or bottom. Only effective when mode != "original".

	digit_per_class : int
	  Number of digits per class in the traning set. If the number of digits is smaller or 
	  equal than the available digits in that class, it will sample without replacement, 
	  otherwise will sample with replacement. Default None to use all the given data.

	one_hot : bool
	  whether to return the y_train and y_test as one_hot vector. Default False.
	"""
    np.random.seed(42)  # Seed the random number generator
    dataset = fetch_mldata('MNIST original')
    X_flattened = dataset.data
    y = dataset.target.astype(int)
    # Reshaping the examples into the shape of (-1, 28, 28)
    X = np.array([np.reshape(f, (-1, 28)) for f in X_flattened])
    # Always split the MNIST training set and testing set in the traditional way:
    X_train, X_test = X[:60000], X[60000:]
    y_train, y_test = y[:60000], y[60000:]
    # sample training data with each class having digit_per_class of digits
    X_train, y_train = sample_digits(X_train,
                                     y_train,
                                     digit_per_class=digit_per_class)
    print(
        "MNIST dataset fetched. Length: training set: {0} images, testing set {1} images"
        .format(len(X_train), len(X_test)))
    if mode != "original":
        # Crop and shrink the images:
        X_train = crop_shrink_pad_image(X_train,
                                        core_size=core_size,
                                        target_image_size=target_image_size)
        X_test = crop_shrink_pad_image(X_test,
                                       core_size=core_size,
                                       target_image_size=target_image_size)
        # randomly translate the image within the boundary:
        if mode == "random translation":
            X_train = random_translate_image(
                X_train,
                row_limit=(0, target_image_size[0] - core_size[0]),
                column_limit=(0, target_image_size[1] - core_size[1]))
            X_test = random_translate_image(
                X_test,
                row_limit=(0, target_image_size[0] - core_size[0]),
                column_limit=(0, target_image_size[1] - core_size[1]))
        # randomly translate the image with periodic boundary condition:
        elif mode == "random translation torus":
            X_train = random_translate_image(
                X_train,
                row_limit=(0, target_image_size[0]),
                column_limit=(0, target_image_size[1]))
            X_test = random_translate_image(
                X_test,
                row_limit=(0, target_image_size[0]),
                column_limit=(0, target_image_size[1]))
        else:
            raise Exception(
                "Mode not recognized! Please choose from the available modes!")
    if one_hot:
        from util import dense_to_one_hot
        y_train = dense_to_one_hot(y_train, num_classes=10)
        y_test = dense_to_one_hot(y_test, num_classes=10)
    X_train = X_train / float(255)
    X_test = X_test / float(255)
    return (X_train, y_train), (X_test, y_test)
コード例 #9
0
def get_1D_double_code(
    code_size=5,
    num_labels=4,
    code_margin=2,
    image_size=12,
    num_train=100,
    num_test=100,
    one_hot=False,
):
    """
	Generates 1D double bianry codes with translational symmetry. For example, a double code
	may look like 010011000111010000, where the there are two single codes "10011" and "11101",
	separated by a margin of 3. Each single binary code has "1" at the left-most and right-most 
	positions, so that we can identify the starting and ending of the code when it is translated.

	
	Parameters
	----------
	code_size : int
	  The size of each code. For example, 11001 has code_size of 5. Since all codes has "1" at the left-most
	and right-most positions, the effective length of the code is code_size - 2. The num_labels cannot exceed
	2 ** (code_size - 2).
	  
	num_labels : int
	  The number of different codes. It cannot exceed 2 ** (code_size - 2).

	code_margin: int
	  The margin between the two single-codes in the double code.
	  
	image_size : int
	  size of the image that the code can be translated on.
	
	num_train : int
	  Number of training data.
	  
	num_test : int
	  Number of testing data.

	one_hot: bool
	  Whether the labels y_train and y_test are in one_hot format
	"""
    assert code_size > 2, "code size should be larger than 2!"
    assert image_size >= 2 * code_size + code_margin, "image_size should be equal to or larger than (2 * code_size + code_margin)!"
    assert num_labels <= 2**(
        code_size - 2), "number_labels should not exceed 2 ^ (code_size - 2)"
    from sklearn.model_selection import train_test_split
    chosen_codes = sorted(
        choice(2**(code_size - 2), size=num_labels, replace=False))
    chosen_codes_binary = []
    white_space = image_size - (2 * code_size + code_margin)
    # Generate num_lables different binary codes with size of code_size:
    for i in range(num_labels):
        label = i
        binary = np.binary_repr(chosen_codes[i], width=code_size - 2)
        binary = (np.array([1] + [int(letter)
                                  for letter in binary] + [1])).astype(int)
        chosen_codes_binary.append((binary, label))
        print(binary, "label: {0}".format(label))

    # Construct the training and testing sets.
    data_examples = []
    data_labels = []
    for i in range(num_train + num_test):
        idx1, idx2 = choice(num_labels, size=2)
        code1, label1 = chosen_codes_binary[idx1]
        code2, label2 = chosen_codes_binary[idx2]
        label = (label1, label2
                 )  # Since it is double code, it has double labels.
        code = np.concatenate((code1, np.zeros(code_margin), code2,
                               np.zeros(white_space))).astype(int)
        shift_amount = choice(
            white_space + 1)  # Each code is randomly translated on the image.
        code = np.roll(code, shift_amount)
        data_examples.append(code)
        data_labels.append(label)
    if one_hot:
        data_labels = dense_to_one_hot(data_labels, num_labels)
    else:
        data_labels = np.expand_dims(data_labels, 1)
    X_train, X_test, y_train, y_test = train_test_split(
        data_examples,
        data_labels,
        test_size=num_test / float(num_train + num_test))
    return (X_train, y_train), (X_test, y_test)
コード例 #10
0
def get_1D_code(
    code_size=5,
    num_labels=4,
    image_size=10,
    num_train=100,
    num_test=100,
    one_hot=True,
):
    """
	Generates 1D bianry codes with translational symmetry. Each code has "1" at the left-most
	and right-most positions, so that we can identify the starting and ending of the code when it is translated.
	For example, 11001 is a valid code. 10100 is not a valid code.
	
	Parameters
	----------
	code_size : int
	  The size of each code. For example, 11001 has code_size of 5. Since all codes has "1" at the left-most
	and right-most positions, the effective length of the code is code_size - 2. The num_labels cannot exceed
	2 ** (code_size - 2).
	  
	num_labels : int
	  The number of different codes. It cannot exceed 2 ** (code_size - 2).
	  
	image_size : int
	  size of the image that the code can be translated on.
	
	num_train : int
	  Number of training data.
	  
	num_test : int
	  Number of testing data.

	one_hot: bool
	  Whether the labels y_train and y_test are in one_hot format. Default True.
	"""
    assert code_size > 2, "code size should be larger than 2!"
    assert image_size >= code_size, "image_size should be equal to or larger than code_size!"
    assert num_labels <= 2**(
        code_size - 2), "number_labels should not exceed 2 ^ (code_size - 2)"
    from sklearn.model_selection import train_test_split
    chosen_codes = sorted(
        choice(2**(code_size - 2), size=num_labels, replace=False))
    chosen_codes_binary = []
    white_space = image_size - code_size
    # Generate num_lables different binary codes with size of code_size:
    for i in range(num_labels):
        label = i
        binary = np.binary_repr(chosen_codes[i], width=code_size - 2)
        binary = np.concatenate(
            (np.array([1] + [int(letter) for letter in binary] + [1]),
             np.zeros(white_space))).astype(int)
        chosen_codes_binary.append((binary, label))
        print(binary[:code_size], "label: {0}".format(label))

    # Construct the training and testing sets.
    data_examples = []
    data_labels = []
    for i in range(num_train + num_test):
        idx = choice(num_labels)
        example, label = chosen_codes_binary[idx]
        shift_amount = choice(
            white_space + 1)  # Each code is randomly translated on the image.
        example = np.roll(example, shift_amount)
        data_examples.append(example)
        data_labels.append(label)
    data_examples = np.array(data_examples)
    data_labels = np.array(data_labels)
    if one_hot:
        data_labels = dense_to_one_hot(data_labels, num_labels)
    else:
        data_labels = np.expand_dims(data_labels, 1)

    X_train, X_test, y_train, y_test = train_test_split(
        data_examples,
        data_labels,
        test_size=num_test / float(num_train + num_test))
    return (X_train, y_train), (X_test, y_test)
コード例 #11
0
def get_binary_sequence(
    code="10101",
    input_size=10,
    num_train=100,
    num_test=100,
    one_hot=True,
    plus_minus_format=True,
):
    """
    Generates 1D bianry binary sequences with label y=1 only if it contains the given code. The sequence has 
    periodic boundary conditions.

    Parameters
    ----------
    code : str
      The code given. The label y = 1 if and only if the binary sequence contains the code given.

    input_size : int
      size of the image that the code can be translated on.

    num_train : int
      Number of training data.

    num_test : int
      Number of testing data.

    one_hot: bool
      Whether the labels y_train and y_test are in one_hot format. Default True.

    plus_minus_format : bool
      Use 1 and -1 instead of 1 and 0 as binary encoding. Default True.
    """
    assert len(
        code
    ) <= input_size, "The length of hidden code should not exceed the input"
    from sklearn.model_selection import train_test_split
    data_examples = []
    data_labels = []
    for i in range(num_train + num_test):
        integer = np.random.randint(2**input_size)
        binary = np.binary_repr(integer, width=input_size)
        label = 0
        for i in range(input_size):
            binary_shift = binary[i:] + binary[:i]
            if code in binary_shift:
                is_code = True
                label = 1
                break
        if plus_minus_format:
            binary = (np.array([2 * int(letter) - 1
                                for letter in binary])).astype(int)
        else:
            binary = (np.array([int(letter) for letter in binary])).astype(int)
        data_examples.append(binary)
        data_labels.append(label)
    data_examples = np.array(data_examples)
    data_labels = np.array(data_labels)
    if one_hot:
        data_labels = dense_to_one_hot(data_labels, 2)
    else:
        data_labels = np.expand_dims(data_labels, 1)
    X_train, X_test, y_train, y_test = train_test_split(
        data_examples,
        data_labels,
        test_size=num_test / float(num_train + num_test))
    return (X_train, y_train), (X_test, y_test)