예제 #1
0
    # Train model
    for i in range(num_iters):
        batch_x = data_generator.next()
        _, l = sess.run([optimizer, loss], feed_dict={X: batch_x})
        if i % show_every == 0:
            print("Training loss (MSE) : %.4f" % (l))

    # Test model and report MAE
    test_data_pred = sess.run(decoder_op, feed_dict={X: test_data})
    return mae(test_data, test_data_pred)


# In[29]:

movies = parser.load_data(dd + "u.item", parser.Movie, '|')
ratings = []

for i in range(1, 1 + 5):
    ratings.append([
        parser.load_data(dd + "u" + str(i) + ".base", parser.Rating, '\t'),
        parser.load_data(dd + "u" + str(i) + ".test", parser.Rating, '\t')
    ])
users = parser.load_data(dd + "u.user", parser.User, '|')

hidden = [10, 20, 40, 80, 100, 200, 300, 400, 500]
hidden_errors = []

for num_hidden in hidden:
    errors = []
    for fold in ratings:
예제 #2
0
import requests
from flask import Flask, render_template, request
from parser import load_data
from helpers import get_static_image

app = Flask(__name__)

# Load data once
data = load_data()

@app.route('/')
def index():
	return render_template('index.html')

@app.route('/sort')
def render_alphabetical():
	sortedData = data
	sortedData.sort(key=lambda _: _['name'])
	return render_template('postcodes.html', data=sortedData)


@app.route('/maps')
def extract_postcodes():
	postcodes = [_['postcode'] for _ in data]
	query = {"postcodes": postcodes}

	# Bulk request of postcode data
	r = requests.post('http://postcodes.io/postcodes', query)

	if r.status_code == 200:
		result = r.json()['result']
예제 #3
0
# coding: utf-8

# In[ ]:


import keras 
import numpy as np
from parser import load_data 


# In[ ]:


training_data = load_data('Data/training')
validation_data = load_data('Data/validation')


# In[ ]:


model = Sequential()

model.add(Convolution2D(32,3,3 input_shape=(img_width, img_height,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))

model.add(Convolution2D(32,3,3 input_shape=(img_width, img_height,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
예제 #4
0
import keras
import numpy as np
from parser import load_data
#---------------------------------------

import os

training_data = load_data('data\smalltrain')
validation_data = load_data('data\smallvalid')

model = sequential()
model.add(Convolution2D(32, 3, input_shape=(img_width, img_height, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size(2, 2)))

model = sequential()
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size(2, 2)))

model = sequential()
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size(2, 2)))

model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
예제 #5
0
"""
import numpy as np
import tensorflow as tf
import time
from parser import FLAGS, load_data, build_embed
from srn import HLSTM
from srn_tool import train_srn, evaluate_srn, inference_srn
from pn import PolicyGradient
from pn_tool import pretrain_pn, train_pn, develop_pn, evaluate_pn, inference_pn

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
    if FLAGS.log_parameters:
        print(FLAGS.__flags)
    label_train, text_train, sentence_len_train, keyword_train = load_data(
        FLAGS.data_dir, FLAGS.train_filename)
    label_dev, text_dev, sentence_len_dev, keyword_dev = load_data(
        FLAGS.data_dir, FLAGS.valid_filename)
    label_test, text_test, sentence_len_test, keyword_test = load_data(
        FLAGS.data_dir, FLAGS.test_filename)
    embed = build_embed(FLAGS.data_dir, FLAGS.word_vector_filename)

    SRN_graph = tf.Graph()
    PN_graph = tf.Graph()

    with SRN_graph.as_default():
        SRN = HLSTM(FLAGS.symbols, FLAGS.embed_units, FLAGS.hidden_units,
                    FLAGS.labels, embed, FLAGS.learning_rate_srn)
        if FLAGS.log_parameters:
            SRN.print_parameters()
        init_srn = tf.global_variables_initializer()
# Machine-Learning  classification of apple logo and apple fruit

from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.datasets import mnist
import numpy as np
from parser import load_data

training_data=load_data('data/training')
validate_data=load_data('data/validation')

model=Sequential()
model.add(Convolution2D(32,3,3,input_shape=(img_width,img_height,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))

model.add(Convolution2D(32,3,3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))

model.add(Convolution2D(64,3,3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))

model.add(Flattern())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
import keras
import numpy as np
from parser import load_data
#---------------------------------------

import os

training_data=load_data('data\smalltrain')
validation_data=load_data('data\smallvalid')

model =sequential()
model.add(Convolution2D(32,3, input_shape=(img_width,img_height,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size(2,2)))

model =sequential()
model.add(Convolution2D(32,3,3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size(2,2)))

model =sequential()
model.add(Convolution2D(64,3,3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size(2,2)))


model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
예제 #8
0
# tutorial posted by Siraj Raval named 'How to Make an Image Classifier - Intro to Deep Learning #6'.
# An accompanying video is available at https://www.youtube.com/watch?v=cAICT4Al5Ow.
# Created: 22/04/18

# TODO: 

# Import the keras and numpy modules
import keras
import numpy as np

# Import...
from parser import load_data

# Load the training and testing data from their respective directories within
# the server
training_data = load_data('')
validation_data = load_data('')

# Create a sequential model (simpler than a standard graph model) that has three
# layers. Each layer will have a convolutional 2d filter that will filter the
# input images (that consist of three layers, namely RGB) and output the probability
# that the target image is classified as belonging to the same class as the training
# data (namely the input file of faces). We then sequentially pass the output of the
# convolutional # into a rectified linear unit (relu) activation layer which will
# increases the non-linear properties of the model to enable the model to recognise
# non-linear functions (i.e. more complex functions than linear regression). Finally
# the feature map of the relu activation layer is passed (again sequentially) to the
# pooling function so that the
model = Sequential()
model.add(Convolutional2D(32,3,3 input_shape=(img_width,img_height,3)))
model.add(Activation('relu'))
예제 #9
0
def generate_data(train_dir, use):
  shift = 2
  result = load_data(train_dir)
  train_images = result['train_images']
  train_ids =  result['train_ids']
  train_labels = result['train_labels']
  total = train_images.shape[0]
  #morphology = {"erosion":([1,2],erosion),
  #"dilation":([1,2,3,4],dilation)
  morphology = {"erosion":([2],erosion),
  "dilation":([3],dilation)
  }
  shear_size=20
  generate_images = []
  generate_labels = []
  #actions  = [up, down, left, down, upper_left, upper_right, lower_left, lower_right]
  for i in xrange(total):
    image = train_images[i]
    label = train_labels[i]

    # put original data
    if "default" in use:
      generate_images.append(image)
      generate_labels.append(label)

    if image.shape != (28, 28, 1):
      logger.info("error shape")
      return
    b = image.reshape(28, 28)
    b_T, b_B, b_L, b_R = dectect_boundary(b,0,shift)
    actions  = [(up,[b_T]),
              (down,[b_B]),
              (left,[b_L]),
              (right,[b_R]),]
              #(upper_left,[b_T,b_L]),
              #(upper_right,[b_T,b_R]),
              #(lower_left,[b_B,b_L]),
              #(lower_right,[b_B,b_R])]
    if "shift" in use:
      for action,args in actions:
        t =  action(*tuple([b]+args))
        t = t.reshape(28,28,1)
        generate_images.append(t)
        generate_labels.append(label)

    if "rotate" in use:
      #for angle in [-20,-15,-10,-5,5,10,15,20]:
      for angle in [-15,15]:
        t = rotate(b, angle)
        t = t.reshape(28,28,1)
        generate_images.append(t)
        generate_labels.append(label)

    if "morphology" in use:
      for fps, method in morphology.values():
        for fp in fps :
          size, cross = draw_circle(fp)
          t = method(b,size,cross)
          t = t.reshape(28,28,1)
          generate_images.append(t)
          generate_labels.append(label)

    if "shear" in use:
      y,x=b.shape
      for site in ["v1","v2","h1","h2"]:
        t = shear(b,shear_size,site,y,x)
        t = t.reshape(28,28,1)
        generate_images.append(t)
        generate_labels.append(label)

    if "noise" in use:
      t =  noise(b)
      t = t.reshape(28,28,1)
      generate_images.append(t)
      generate_labels.append(label)

    if "dilation+Crop_UP_DOWN" in use:
      size, cross = draw_circle(3)
      t = dilation(b,size,cross)
      t_T, t_B, t_L, t_R = dectect_boundary(t,0)
      t = Crop_UP_DOWN(t,t_T+3, t_B+3)
      t = t.reshape(28,28,1)
      generate_images.append(t)
      generate_labels.append(label)

  generate_images = numpy.array(generate_images)
  generate_labels = numpy.array(generate_labels)
  return generate_images, generate_labels
예제 #10
0
파일: catsvsdogs.py 프로젝트: prashplus/ML
import keras #machine learning
import numpy as np #math
from parser import load_data #data loading

#Step 1: Collect data
training_data = load_data('data\training')
validation_data = load_data('data\validation')

#Step 2: Build model
model = Sequential()

model.add(Convolution2D(32,3,3 input_shape=(img_width, img_height,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))

model.add(Convolution2D(32,3,3)
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))

model.add(Convolution2D(64,3,3)
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))

model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))

model.compile(loss='binary_crossentropy',