Esempio n. 1
0
print('Samples: ' + str(N) + ' Attributes: ' + str(p) + ' Classes: ' + str(Nc))

##################### 3. Pre-process labels

Y = class_label_encode(Y, Nc, label_type='bipolar')

##################### 4. Split Data Between Train and Test

Xtr_raw, y_tr, Xts_raw, y_ts = hold_out.random_subsampling(X=X_raw,
                                                           Y=Y,
                                                           train_size=0.8,
                                                           random_state=1)
##################### 5. Normalize data

X_tr = normalize(Xtr_raw, norm_type='z_score')
X_ts = normalize(Xts_raw, norm_type='z_score', X_ref=Xtr_raw)
X = normalize(X_raw, norm_type='z_score', X_ref=Xtr_raw)

X_tr2 = preprocess_input(X_tr, in_row=True, bias=False)
y_tr2 = preprocess_output(y_tr, in_row=True)

N = X_tr2.shape[1]

X_ts2 = preprocess_input(X_ts, in_row=True, bias=False)
y_ts2 = preprocess_output(y_ts, in_row=True)

# # Verify data pattern

# print('Training dataset:')
# print(X_tr.shape)
Esempio n. 2
0
# 1. Load dataSet

X_raw, Y = classification_data_loader.load_dataset(dataset='iris')

# 2. Get dataset's info (number of samples, attributes, classes)

ds_info = classification_data_loader.extract_info(X_raw, Y)
N = ds_info['n_samples']
p = ds_info['n_inputs']
Nc = ds_info['n_outputs']

print('Samples: ' + str(N) + ' Attributes: ' + str(p) + ' Classes: ' + str(Nc))

# 5. Normalize data

X_norm = normalize(X_raw, norm_type='z_score', X_ref=X_raw)

# 6. Build Model

Nk = 5  # Define number of prototypes

wtaCluster = WinnerTakesAllClustering(Nprot=Nk)
wtaCluster.fit(X_norm)
indices = wtaCluster.predict(X_norm)

wtaClass = WinnerTakesAllClassifier(Nprot=Nk)
wtaClass.fit(X_norm, Y)

# 7. Visualize

print(indices.shape)