def main():
    # If the training and test sets aren't stored locally, download them.
    if not os.path.exists(IRIS_TRAINING):
        raw = urlopen(IRIS_TRAINING_URL).read()
        with open(IRIS_TRAINING, "wb") as f:
            f.write(raw)

    if not os.path.exists(IRIS_TEST):
        raw = urlopen(IRIS_TEST_URL).read()
        with open(IRIS_TEST, "wb") as f:
            f.write(raw)

    # Load datasets.
    training_set = load_csv_with_header(filename=IRIS_TRAINING,
                                        target_dtype=np.int,
                                        features_dtype=np.float32)
    test_set = load_csv_with_header(filename=IRIS_TEST,
                                    target_dtype=np.int,
                                    features_dtype=np.float32)

    # Specify that all features have real-value data
    feature_columns = [real_valued_column("", dimension=4)]

    # Build 3 layer DNN with 10, 20, 10 units respectively.
    classifier = DNNClassifier(feature_columns=feature_columns,
                               hidden_units=[10, 20, 10],
                               n_classes=3,
                               model_dir="/tmp/iris_model")

    # Define the training inputs
    def get_train_inputs():
        x = tf.constant(training_set.data)
        y = tf.constant(training_set.target)

        return x, y

    # Fit model.
    classifier.fit(input_fn=get_train_inputs, steps=2000)

    # Define the test inputs
    def get_test_inputs():
        x = tf.constant(test_set.data)
        y = tf.constant(test_set.target)

        return x, y

    # Evaluate accuracy.
    accuracy_score = classifier.evaluate(input_fn=get_test_inputs,
                                         steps=1)["accuracy"]

    print("\nTest Accuracy: {0:f}\n".format(accuracy_score))

    # Classify two new flower samples.
    def new_samples():
        return np.array([[6.4, 3.2, 4.5, 1.5], [5.8, 3.1, 5.0, 1.7]],
                        dtype=np.float32)

    predictions = list(classifier.predict(input_fn=new_samples))

    print("New Samples, Class Predictions:    {}\n".format(predictions))
Esempio n. 2
0
def main():
    training_set = tf.contrib.learn.datasets.base.load_csv_with_header(
        filename='./iris_data/iris_training.csv',
        target_dtype=np.int,
        features_dtype=np.float32)
    test_set = tf.contrib.learn.datasets.base.load_csv_with_header(
        filename='./iris_data/iris_test.csv',
        target_dtype=np.int,
        features_dtype=np.float32)

    feature_columns = [tf.feature_column.numeric_column("x", shape=[4])]

    clf = DNNClassifier(hidden_units=[10, 20, 10],
                        feature_columns=feature_columns,
                        model_dir='./iris_model',
                        n_classes=3)

    train_input_fn = tf.estimator.inputs.numpy_input_fn(
        x={"x": np.array(training_set.data)},
        y=np.array(training_set.target),
        num_epochs=None,
        shuffle=True)

    clf.fit(input_fn=train_input_fn, steps=2000)

    test_input_fn = tf.estimator.inputs.numpy_input_fn(
        x={"x": np.array(test_set.data)},
        y=np.array(test_set.target),
        num_epochs=1,
        shuffle=False)

    accuracy_score = clf.evaluate(input_fn=test_input_fn)["accuracy"]
    print("\nTest Accuracy: {0:f}\n".format(accuracy_score))

    new_samples = np.array([[6.4, 3.2, 4.5, 1.5], [5.8, 3.1, 5.0, 1.7]],
                           dtype=np.float32)
    predict_input_fn = tf.estimator.inputs.numpy_input_fn(x={"x": new_samples},
                                                          num_epochs=1,
                                                          shuffle=False)
    predictions = list(clf.predict(input_fn=predict_input_fn))
    print predictions

    print("New Samples, Class Predictions:    {}\n".format(predictions))
Esempio n. 3
0
class DNN(BaseEstimator, ClassifierMixin):
    def __init__(self,
                 n_classes,
                 type="w2v",
                 hidden_units=[10, 20, 10],
                 num_features=100,
                 context=10,
                 method=1):
        #if type=="w2v":
        #self.model = w2v_helpers.get_word2vec(num_features, context, method)
        self.type = type
        self.classifier = DNNClassifier(hidden_units=hidden_units,
                                        n_classes=n_classes)

    def pre_transformX(self, df, colnames, df_test=None, n_gram=None):
        data = None
        if self.type == "w2v":
            data = features_helpers.create_sentences(df, colnames)
            data = features_helpers.transform_to_w2v_sentences(
                data, self.model)
            return data.as_matrix()
        else:
            x_train, x_test = features_helpers.transform_to_bow(
                df, df_test, colnames, n_gram)
            return x_train, x_test

    def pre_transformY(self, df, list_dict):
        y = map(lambda w: list_dict.index(w), list(df))
        return np.array(y)

    def fit(self, X, y=None):
        self.classifier.fit(x=X, y=y, steps=200)

    def predict(self, X, y=None):
        return self.classifier.predict(X)

    def evaluate(self, X, Y):
        return self.classifier.evaluate(x=X, y=Y)["accuracy"]

    def score(self, X, y, sample_weight=None):
        return super(DNN, self).score(X, y, sample_weight)
def dnn_main():
    x_train, x_test, y_train, y_test = load_SpamBase(
        "../data/spambase/spambase.data")
    feature_columns = infer_real_valued_columns_from_input(x_train)
    print(feature_columns)
    # hidden_units = [30, 10],表明具有两层隐藏层,每层节点数分别为30和10
    classifier = DNNClassifier(feature_columns=feature_columns,
                               hidden_units=[30, 10],
                               n_classes=2)
    # steps=500表明训练500个批次,batch_size=10表明每个批次有10个训练数据。
    # 一个epoch指的是使用全部数据集进行一次训练。进行训练时一个epoch可能更新了若干次参数。epoch_num为指定的epoch次数。
    # 一个step或一次iteration指的是更新一次参数,每次更新使用数据集中的batch_size个数据。
    # 注意: 使用相同的数据集,epoch也相同时,参数更新此时不一定是相同的,这时候会取决于batch_size。
    # iteration或step的总数为(数据总数 / batch_size + 1) * epoch_num
    # 每个epoch都会进行shuffle,对要输入的数据进行重新排序,分成不同的batch。

    classifier.fit(x_train, y_train, steps=500, batch_size=10)
    y_predict = list(classifier.predict(x_test, as_iterable=True))
    #y_predict = classifier.predict(x_test)
    #print y_predict
    score = metrics.accuracy_score(y_test, y_predict)
    print('Accuracy: {0:f}'.format(score))
Esempio n. 5
0
print('============================================================')
for classifier, acc, cv_acc in results:
    print(
        'Classifier = {}: Accuracy = {} || Mean Cross Val Accuracy scores = {}'
        .format(classifier, acc, cv_acc))

for name, bp in bestparams:
    print('============================================================')
    print('{}-classifier GridSearch Best Params'.format(name))
    print('============================================================')
    display(bp)
print()
print()

feature_columns = [
    tf.contrib.layers.real_valued_column("", dimension=len(X[0]))
]
dl_clf = DNNClassifier(hidden_units=[10, 20, 10],
                       n_classes=2,
                       feature_columns=feature_columns,
                       model_dir="/tmp/ilpd")
dl_clf.fit(X_train, y_train, steps=4000)
predictions = list(dl_clf.predict(X_test, as_iterable=True))
acc = accuracy_score(y_test, predictions)
print('============================================================')
print('Classifier = {}: Accuracy = {} '.format(DNNClassifier, acc))
print('============================================================')
print('{}-classifier GridSearch Best Params'.format(DNNClassifier))
display(dl_clf.params)
print('============================================================')

results, dataframes, best_parameters = parameter_tuning(models, X_train, X_test, y_train, y_test)
print()
print('============================================================')
for classifier, acc, cv_acc in results:
    print('{}: Accuracy with Best Parameters = {}% || Mean Cross Validation Accuracy = {}%'.format(classifier, round(acc*100,4), round(cv_acc*100,4)))
print()

for name, bp in best_parameters:
    print('============================================================')
    print('{} classifier GridSearch Best Parameters'.format(name))
    display(bp)
print()
print()

# Deep Learning using Tensor flow
feature_columns = [tf.contrib.layers.real_valued_column("", dimension=len(X[0]))]
deep_learning = DNNClassifier(hidden_units=[10,20,10],
                      feature_columns=feature_columns, model_dir="/tmp/iris")
deep_learning.fit(X_train, y_train, steps=1500)
predictions = list(deep_learning.predict(X_test, as_iterable=True))
acc = accuracy_score(predictions, predictions)
print('============================================================')
print('Deep Learning classifier Accuracy = ', round(acc*100,4),'%')
print('------------------------------------------------------------')
print('Deep Learning classifier Best Parameters')
display(deep_learning.params)
print('***************** Execution Completed **********************')
print('------------------------------------------------------------')
Esempio n. 7
0
                        type=str)
    parse = parser.parse_args()
    TRAIN_DATASET = parse.train
    TEST_DATASET = parse.test
    OUTPUT_PATH = parse.output
    np.random.seed(19260817)

    train_set = pandas.read_csv(TRAIN_DATASET)
    test_set = pandas.read_csv(TEST_DATASET)
    encoder = LabelEncoder().fit(train_set["species"])
    train = train_set.drop(["species", "id"], axis=1).values
    label = encoder.transform(train_set["species"])
    test = test_set.drop(["id"], axis=1).values
    scaler = StandardScaler().fit(train)
    train = scaler.transform(train)
    scaler = StandardScaler().fit(test)
    test = scaler.transform(test)

    feature_columns = [real_valued_column("", dimension=192)]
    classifier = DNNClassifier(feature_columns=feature_columns,
                               n_classes=99,
                               hidden_units=[1024, 512, 256],
                               optimizer=tf.train.AdamOptimizer)
    classifier.fit(x=train, y=label, steps=1000)
    output = classifier.predict(test)
    output_prob = classifier.predict_proba(test)
    test_id = test_set.pop("id")
    result = pandas.DataFrame(output_prob,
                              index=test_id,
                              columns=encoder.classes_)
    result.to_csv(OUTPUT_PATH)
Esempio n. 8
0
    l.remove(l[0])
    l = np.array(l)
    labels = l[:, :1]
    data = l[:, 1:]
    return to_int(data), formalize(to_int(labels), 10)


def load_test_data():
    l = []
    with open("test.csv") as f:
        lines = csv.reader(f)
        for line in lines:
            l.append(line)
    l.remove(l[0])
    return to_int(l)


train_images, train_labels = load_train_data()
test_images = load_test_data()
print(train_images[0])

feature_columns = infer_real_valued_columns_from_input(train_images)
clf = DNNClassifier([100], feature_columns, n_classes=10)
print(train_images.shape)
print(train_labels.shape)
clf.fit(train_images, train_labels)
print("done training")

pred = clf.predict(test_images[0])
print(pred)