def __init__(self,
              X,
              types_softmax,
              probs_softmax,
              magnitudes_softmax,
              argmax=False):
     # Ekin Dogus says he sampled the softmaxes, and has not used argmax
     # We might still want to use argmax=True for the last predictions, to ensure
     # the best solutions are chosen and make it deterministic.
     transformations = get_transformations(X)
     if argmax:
         self.type = types_softmax.argmax()
         t = transformations[self.type]
         self.prob = probs_softmax.argmax() / (OP_PROBS - 1)
         m = magnitudes_softmax.argmax() / (OP_MAGNITUDES - 1)
         self.magnitude = m * (t[2] - t[1]) + t[1]
     else:
         self.type = np.random.choice(OP_TYPES, p=types_softmax)
         t = transformations[self.type]
         self.prob = np.random.choice(np.linspace(0, 1, OP_PROBS),
                                      p=probs_softmax)
         self.magnitude = np.random.choice(np.linspace(
             t[1], t[2], OP_MAGNITUDES),
                                           p=magnitudes_softmax)
     self.transformation = t[0]
Ejemplo n.º 2
0
    def __init__(self,
                 test=False,
                 batch_size=200,
                 one_hot=True,
                 shuffle=True,
                 sampling=1.0,
                 policy=None):
        (x_train, y_train), (x_test, y_test) = load_data()
        if test:
            self._data = x_test
            self._label = y_test
        else:
            self._data = x_train
            self._label = y_train
        if one_hot:
            one_hot_labels = np.zeros((len(self._label), 10))
            one_hot_labels[np.arange(len(self._label)),
                           self._label.flatten()] = 1
            self._label = one_hot_labels
        self._batch_size = batch_size

        # shuffle before reduce the dataset
        self._num_samples = len(self._data)
        self._num_left = self._num_samples
        self.shuffle = shuffle
        if (shuffle):
            self.shuffle_data()

        # reduce dataset
        assert sampling > 0, "Sampling must be greater than 0"
        assert sampling <= 1, "Sampling must be less than 0"

        self._data = self._data[:int(len(self._data) * sampling)]
        self._label = self._label[:int(len(self._label) * sampling)]

        self._num_samples = len(self._data)
        self._num_left = self._num_samples
        self._batch_pointer = 0

        if policy == None:
            #raise Exception("Policy not given. Need to provide policy")
            print("No policy given. Using dataset without data augmentation.")
        else:
            self._policy = policy

            # Note that for pairing sample, the img2 is chose randomly from the entire dataset
            self._transformations = get_transformations(x_train)
            self._apply_policy()
Ejemplo n.º 3
0
        (Xtr, ytr), (Xts, yts) = datasets.cifar10.load_data()
    elif dataset == 'cifar100':
        (Xtr, ytr), (Xts, yts) = datasets.cifar100.load_data()
    else:
        raise Exception('Unknown dataset %s' % dataset)
    if reduced:
        ix = np.random.choice(len(Xtr), 4000, False)
        Xtr = Xtr[ix]
        ytr = ytr[ix]
    ytr = utils.to_categorical(ytr)
    yts = utils.to_categorical(yts)
    return (Xtr, ytr), (Xts, yts)


(Xtr, ytr), (Xts, yts) = get_dataset('cifar10', False)
transformations = get_transformations(Xtr)

# Experiment parameters

LSTM_UNITS = 100

SUBPOLICIES = 5
SUBPOLICY_OPS = 2

OP_TYPES = 16
OP_PROBS = 11
OP_MAGNITUDES = 10

CHILD_BATCH_SIZE = 128
CHILD_BATCHES = len(Xtr) // CHILD_BATCH_SIZE
CHILD_EPOCHS = 10