コード例 #1
0
def read_data():
    """Reads the states and actions recorded by drive_manually.py"""
    print("Reading data")
    with gzip.open('./data_from_expert/data_02.pkl.gzip', 'rb') as f:
        data = pickle.load(f)
    X = utils.vstack(data["state"])
    y = utils.vstack(data["action"])
    return X, y
コード例 #2
0
def read_data(data_path, use_last=False):
    # TODO: Fix the file thing
    print("Reading data...")
    all_states, _, _, all_actions, _ = utils.read_all_gzip(data_path)

    if use_last:
        all_states = all_states[-1:]
        all_actions = all_actions[-1:]

    X = utils.vstack(all_states)
    y = utils.vstack(all_actions)

    return X, y
コード例 #3
0
        output = model(data)

        loss = criterion(output, target)

        batch_real_data.append(target.numpy())
        batch_predicted.append(output.numpy())

        batch_size = data.shape[0]
        total_loss += loss.item() * batch_size

    n_samples = len(data_loader.sampler)
    loss = total_loss / n_samples
    print('{} Loss: {}'.format(total_loss, loss))

real_data = vstack(batch_real_data)
predicted = vstack(batch_predicted)
print(predicted)
d = 0
for i in range(len(real_data)):
    print(predicted[i])
    if real_data[i] == np.argmax(predicted[i]):
        d += 1

print(d / len(real_data) * 100)
#real_data = dataset.min_max_scaler.inverse_transform(real_data)
#predicted = dataset.min_max_scaler.inverse_transform(predicted)

real_data = np.exp(real_data)
predicted = np.exp(predicted)
コード例 #4
0
gamma = 1.0

print("Loading data...")

train_num = Dataset.load_part('train', 'numeric')
train_cat = Dataset.load_part('train', 'categorical_dummy')

test_num = Dataset.load_part('test', 'numeric')
test_cat = Dataset.load_part('test', 'categorical_dummy')

print("Combining data...")
#vstack 按行拼接
#hstack 按列拼接
#拼接之后kmeans聚类

all_data = hstack((scale(vstack((train_num, test_num)).astype(np.float64)).astype(np.float32), vstack((train_cat, test_cat))))

for n_clusters in [25, 50, 75, 100, 200]:
    part_name = 'cluster_rbf_%d' % n_clusters

    print("Finding %d clusters..." % n_clusters)

    kmeans = MiniBatchKMeans(n_clusters, random_state=17 * n_clusters + 11, n_init=5)
    kmeans.fit(all_data)

    print("Transforming data...")

    cluster_rbf = np.exp(- gamma * kmeans.transform(all_data))

    print("Saving...")
コード例 #5
0
al1 = ones(a, 1)
al2 = zeros(a, 1)
bl1 = zeros(b, 1)
bl2 = ones(b, 1)
l1a = al1.T
l1b = bl1.T
l2a = al2.T
l2b = bl2.T
ll = zeros(2, 2)
abB = zeros(a + b, 1)
lB = ones(2, 1)
aA = hstack(aa, ab, al1, al2)
bA = hstack(ba, bb, bl1, bl2)
l1A = hstack(l1a, l1b)
l2A = hstack(l2a, l2b)
lA = hstack(vstack(l1A, l2A), ll)
A = vstack(aA, bA, lA)
B = vstack(abB, lB)

print("A,B =")
print(hstack(A, B))

m: List[bool] = np.zeros((a + b + n), dtype=bool)  # mask
m[-2] = 1
m[-1] = 1

# TODO: check that the stationary point is a minimum

v: List[Tuple] = []
while increment(m, 0, a):
    while increment(m, a, a + b):