#
#    Platform specific calls used the add-on.

import os
import shlex
import subprocess
import sys
import xbmc
import xbmcgui
import xbmcvfs
import xbmcaddon
from utility import debugTrace, errorTrace, infoTrace, newPrint, infoPrint, enum, getID, isCustom, getCustom
from sys import platform

# **** ADD MORE PLATFORMS HERE ****
platforms = enum(UNKNOWN=0, WINDOWS=1, LINUX=2, RPI=3, ANDROID=4, MAC=5)
platforms_str = ("Unknown", "Windows", "Linux, openvpn installed",
                 "Linux, openvpn plugin", "Android", "Apple")
fake_name = "FAKECONNECTION.txt"


def fakeConnection():
    # Return True to fake out any calls to openVPN to change the network.
    # This is governed by the existance of 'FAKECONNECTION.txt' in the userdata directory.
    return xbmcvfs.exists(getUserDataPath(fake_name))


def fakeItTillYouMakeIt(fake):
    try:
        if fake:
            if not fakeConnection():
Beispiel #2
0
CHAR_INFO_WIDTH = 50
#INFO_POPUP_WIDTH = 30

LIMIT_FPS = 20  # 20 frames-per-second maximum

# size of the map
MAP_WIDTH = 80
MAP_HEIGHT = 43


def exp_xp_curve(level):
    # exponential
    from math import exp
    return exp(level) * 5

# character generation values
DEFAULT_SPEED = 10
DEFAULT_XP_CURVE = exp_xp_curve

AIR = 0
WATER = 1
EARTH = 2
FIRE = 3

aspects = ["AIR", "WATER", "EARTH", "FIRE"]  # clockwise from air
# (hot_cold, wet_dry), where hot and wet are positive
factors = [(1,1), (1,-1), (-1,-1), (-1,1)]

tile_types = enum("CAVE_FLOOR", "CAVE_WALL", "GRASS",
                  "SAND", "SHALLOW_WATER", "DEEP_WATER")
import cmd

import gevent

import config
from config import MSG, DBQUERY
import network
from utility import enum
import partition

STATE = enum('WAITING_FOR_DATABASE', 
			 'WAITING_FOR_SERVERS', 
			 'ACCEPTING_PLAYERS')

class System:
	def __init__(self, system_id=-1, server=None, connections=[]):
		self.id = system_id
		self.server = server
		self.connections = connections

class Server:
	def __init__(self, server_id=-1, server_socket=None, ip="", client_port=-1, 
				 server_port=-1, systems=None, neighbours=None):
		self.id = server_id
		self.socket = server_socket
		self.ip = ip
		self.client_port = client_port # address for clients to connect on
		self.server_port = server_port # address for servers to connect on
		self.systems = systems if systems is not None else [] # systems we're responsible for
		self.edge_systems = {}
		self.neighbours = neighbours if neighbours is not None else [] # neighbouring servers
Beispiel #4
0
#    Platform specific calls used by VPN Manager for OpenVPN add-on.

import os
import shlex
import subprocess
import sys
import xbmc
import xbmcgui
import xbmcvfs
import xbmcaddon
from utility import debugTrace, errorTrace, infoTrace, newPrint, infoPrint, enum
from sys import platform


# **** ADD MORE PLATFORMS HERE ****
platforms = enum(UNKNOWN=0, WINDOWS=1, LINUX=2, RPI=3, ANDROID=4, MAC=5)  
platforms_str = ("Unknown", "Windows", "Linux, openvpn installed", "Linux, openvpn plugin", "Android", "Apple")
fake_name = "FAKECONNECTION.txt"


def fakeConnection():
    # Return True to fake out any calls to openVPN to change the network.
    # This is governed by the existance of 'FAKECONNECTION.txt' in the userdata directory.
    return xbmcvfs.exists(getUserDataPath(fake_name))

    
def fakeItTillYouMakeIt(fake):
    try:
        if fake:
            if not fakeConnection():
                f = open(getUserDataPath(fake_name),'w')
Beispiel #5
0
import controls
import data
import utility


ManifestType = utility.enum(
    FIELD_DESCRIPTION                       = 3,
    FIELD_DESCRIPTION_NAME                  = 'Field Description',
    KEY_VALUE_LIST                          = 44,
    KEY_VALUE_LIST_NAME                     = 'Key-Value pairs list',
    OPERATIONAL_STATE                       = 27,
    OPERATIONAL_STATE_NAME                  = 'Operational State',
    STANDARD_CONFIGURATION_CONTROLS         = 6,
    STANDARD_CONFIGURATION_CONTROLS_NAME    = 'Standard UI Controls',
    STANDARD_FR8_TERMINAL                   = 23,
    STANDARD_FR8_TERMINAL_NAME              = 'Standard Fr8 Terminal',
    STANDARD_PAYLOAD_DATA                   = 5,
    STANDARD_PAYLOAD_DATA_NAME              = 'Standard Payload Data'
)


class Manifest(object):
    def __init__(self, **kwargs):
        self.manifest_type = kwargs.get('manifest_type')
        self.manifest_type_name = kwargs.get('manifest_type_name')


class OperationalStateCM(Manifest):
    def __init__(self, **kwargs):
        super(OperationalStateCM, self).__init__(
            manifest_type=ManifestType.OPERATIONAL_STATE,
Beispiel #6
0
from utility import enum

__doc__ = """Yeah, globals might be bad, but here's a file full of them anyway"""

target0 = 200
target1 = 824
target2 = 512

MOTORS = [1,2,3,4,5,6,7,8,9]

LEFT_FRONT = MOTORS[0]
RIGHT_FRONT = MOTORS[1]
LEFT_BACK = MOTORS[2]
RIGHT_BACK = MOTORS[3]

LEFT_FRONT_E = MOTORS[4]
RIGHT_FRONT_E = MOTORS[5]
LEFT_BACK_E = MOTORS[6]
RIGHT_BACK_E = MOTORS[7]

IR_X = 1
IR_Y = 2

COMMANDS = enum(GetSensorValue='GetSensorValue', GetMotorTargetPosition='GetMotorTargetPosition', GetMotorCurrentPosition='GetMotorCurrentPosition', SetMotorTargetPosition='SetMotorTargetPosition')
Beispiel #7
0
import uuid
import manifests
import utility


def extract_fr8_crate_contents(contents, manifest_id):
    manifest_id_str = str(manifest_id)
    if manifest_id_str not in manifests.manifest_extractors:
        raise 'Unsupported manifest_id, data extraction failed.'

    return manifests.manifest_extractors[manifest_id_str](contents)


TerminalStatus = utility.enum(ACTIVE=1, INACTIVE=0)

AuthenticationType = utility.enum(NONE=1, INTERNAL=2, EXTERNAL=3, INTERNAL_WITH_DOMAIN=4)

ActivityType = utility.enum(STANDARD=1, LOOP=2, SOLUTION=3)

AvailabilityType = utility.enum(NOTSET=0, CONFIGURATION=1, RUNTIME=2, ALWAYS=3)


class ActivityDTO(object):
    def __init__(self, **kwargs):
        self.name = kwargs.get('name')
        self.label = kwargs.get('label')
        self.activity_template = kwargs.get('activity_template')
        self.root_plan_node_id = kwargs.get('root_plan_node_id')
        self.parent_plan_node_id = kwargs.get('parent_plan_node_id')
        self.current_view = kwargs.get('current_view')
        self.ordering = kwargs.get('ordering')
# network values
SOCKET_SERVER_MAX_QUEUE = 512
SOCKET_CLIENT_MAX_QUEUE = 512
SOCKET_DATABASE_MAX_QUEUE = 512
SOCKET_RECV_MAX_BYTES = 4096

GATEWAY_SERVER_COUNT = 2

MSG_END_CHAR = chr(1)
MSG_TOP_DELIMITER = 2
NEGATIVE_CHAR = 126

MSG_TIME_ACCURACY = 3 # how many digits after decimal point

# database query types
DBQUERY = enum('GW_STARTINFO', 'GS_SYSTEMSINFO', 'GS_UPDATE', 'GW_NEWPLAYER')

# Message types
# Type ID format: '[SENDER]_[RECEIVER]_[DESCRIPTION]'
class MSG:
	OK = 0
	# CLIENT to GATEWAY	
	CL_GW_LOGIN = 1
	CL_GW_LOGOUT = 2	
	CL_GW_INPUT = 5
	CL_GW_REGISTER = 7
	# GATEWAY to CLIENT
	GW_CL_LOGIN_SUCCESSFUL = 3
	GW_CL_LOGIN_FAILED = 4
	GW_CL_FRAME = 6
	GW_CL_REGISTRATION_SUCCESSFUL = 8
from utility import enum, Rect

FRAME_RATE = 1.0/100 # length of a frame in seconds
DATABASE_FRAME_RATE = 4.0 # how often in seconds to commit to DB

PLAYER_SPEED = 100 # pixels/second
PLAYER_TURN_SPEED = math.radians(180) # radians/second
PLAYER_FIRE_RATE = 1.0/5 # per second

WORMHOLE_SIZE = 30
PROJECTILE_SIZE = 5
INTERACTION_AREA = 100

PROJECTILE_SPEED = 200 #pixels/second

INPUTS = enum('FORWARD', 'LEFT', 'RIGHT', 'FIRE', 'INTERACT')

PLAYER_SIZE = 5;

class Client:
	def __init__(self, client_socket=None, address=None, player=None):
		self.socket = client_socket
		self.address = address
		self.player = player

class OfflinePlayer:
	def __init__(self, player_id, name, x, y, solar_system):
		self.id = player_id
		self.name = name
		self.x = x
		self.y = y
Beispiel #10
0
#coding=utf-8 
# ycat			 2014/09/28      create
import pytest
import json
import pytest
import session
import utility
import sqlite3
import traceback
import sys,os,bottle,datetime

sex_type = utility.enum(Male=1,Female=0)

class ctrl_user_manager:
	@staticmethod
	def register(nick,pwd,sex,age):
		assert int(sex) <= 1
		age = int(age)
		try:
			now = utility.now()
			db = utility.get_db()
			c = db.cursor()
			c.execute("INSERT INTO u_user (NickName,Sex,Password,CreateDate,BirthdayYear,CertfState)VALUES(?,?,?,?,?,?)", 
				(nick,int(sex),pwd,now.strftime("%Y-%m-%d %H:%M:%S"),now.year - age,0))			
			db.commit() #不这样做取不到user_id 
			
			user = session.login(nick,pwd) 
			c.execute("INSERT INTO u_profile(ID,EditDate)VALUES(?,?)",(user.user_id,now))
			utility.write_log(user.user_id,"注册成功",1,False)
			db.commit()
			return user
Beispiel #11
0
import data
import manifests
import utility


ControlTypes = utility.enum(
    DROP_DOWN_LIST      = 'DropDownList',
    TEXT_SOURCE         = 'TextSource'
)


class ControlEvent(object):
    def __init__(self, **kwargs):
        self.name = kwargs.get('name')
        self.handler = kwargs.get('handler')

    def to_fr8_json(self):
        return {
            'name': self.name,
            'handler': self.handler
        }

    @staticmethod
    def from_fr8_json(json_data):
        return ControlEvent(name=json_data.get('name'), handler=json_data.get('handler'))


class ControlSource(object):
    def __init__(self, **kwargs):
        self.manifest_type = kwargs.get('manifest_type')
        self.label = kwargs.get('label')
Beispiel #12
0
class Model(object):

    Modes = enum(ONLINE=1, OFFLINE=2)

    # Data split by percentage for
    # [training, validation, testing]
    Split = [0.8, 0.10, 0.05]
    TrainMax = 1024 * 5
    ValidMax = 1024 * 2
    BestValidationInterval = 4

    def __init__(
        self,
        rng,  # random number generator use to initialize weights
        input,  # a theano.tensor.dmatrix of shape (n_examples, n_in)
        batch_size,  # size of mini batch
        patch_size,  # size of feature map
        train_time,  # batch training time before resampling
        path,  # where to load or save best model
        type,  # type of model (CNN or MLP)
        id  # the unique identifier of the model (used for persistence)
    ):

        self.done = False
        self.id = id
        self.type = type
        self.path = path
        self.train_time = train_time
        self.batchSize = batch_size
        self.patchSize = patch_size
        self.input = input
        self.rng = rng
        self.params = []
        self.best_loss = np.inf
        self.i_train = []
        self.i_valid = []
        self.i_test = []
        self.i = []
        self.n_data = 0
        self.iteration = 0
        self.initialized = False
        self.x = input
        self.index = T.lscalar()
        self.label_dist = []

    def setTrainingData(
            self,
            x,  #
            y,  #
            p,  #
            l,  #
            learning_rate,  #
            momentum):
        # save the training set and params
        self.x_data = x
        self.y_data = y
        self.p_data = p
        self.i_labels = l
        self.n_data = len(y)
        self.learning_rate = learning_rate
        self.momentum = momentum
        self.iteration = 0

        # spli the training set into training, validation, and test
        self.n_train = int(math.floor(self.n_data * Model.Split[0]))
        self.n_valid = int(math.floor(self.n_data * Model.Split[1]))
        self.n_test = int(math.floor(self.n_data * Model.Split[2]))
        self.n_train = min(self.n_train, Model.TrainMax)

        self.n_valid = Model.ValidMax
        self.n_train = self.n_data - self.n_valid

        self.i = np.arange(self.n_data)
        self.i = np.random.choice(self.i, self.n_data, replace=False)

        self.i_valid = self.i[self.n_train:self.n_train + self.n_valid]
        self.i_train = []
        self.l_draws = []
        l_percentages = [float(len(l_data)) / self.n_data for l_data in l]
        self.n_draws = [int(self.n_train * l) for l in l_percentages]

        total = np.sum(self.n_draws)
        if (total < self.n_train):
            i_max = np.argmax(self.n_draws)
            self.n_draws[i_max] += (self.n_train - total)

        self.i_train_labels = [[] for l in l_percentages]
        self.i_valid_labels = [[] for l in l_percentages]

        #self.l_dist        = [ len(l_data) for l_data in l]
        '''
		print 'n_valid:', self.n_valid
		print '#valid:', len(self.i_valid)	
 		print '==setTrainingData'	
		print 'p_draws:',l_percentages
		print 'n_draws:', self.n_draws
		print 'total:', np.sum(self.n_draws)
		'''

    def drawRandomizedSamples(self):
        print '------randomized sampling-------'
        self.i_train = []
        for i, n_draw in enumerate(self.n_draws):
            self.i_labels[i] = np.hstack(
                (self.i_labels[i], self.i_train_labels[i]))
            n_ldata = len(self.i_labels[i])
            self.i_labels[i] = np.random.choice(self.i_labels[i],
                                                n_ldata,
                                                replace=False)
            self.i_train_labels[i] = self.i_labels[i][:n_draw]
            self.i_labels[i] = self.i_labels[i][n_draw:]
            self.i_train = np.concatenate(
                (self.i_train, self.i_train_labels[i]))

    def drawStratifiedSamples(self):
        print '------stratified sampling-------'
        n_train = len(self.i_train)
        self.i_train = []
        print 'n_train:', n_train
        for i, n_draw in enumerate(self.n_draws):
            if n_train > 0:
                # determine number of good and bad
                # samples based on the training results
                indices = self.i_train_labels[i]
                n_indices = len(indices)
                p_train = self.p_data[indices]
                i_sorted = np.argsort(p_train, axis=0)
                n_good = len(np.where(p_train[i_sorted] == 0)[0])
                n_bad = len(indices) - n_good

                # keep at most 50% of the bad samples for
                # retraining, and resample the rest
                n_good = max(n_good, n_draw / 2)
                n_bad = n_indices - n_good
                i_good = i_sorted[:n_good]
                i_bad = i_sorted[n_good:]

                print '-------', i, '---------'
                print 'n_indices:', n_indices
                print 'n_good:', n_good
                print 'n_bad:', n_bad
                print 'i_sorted:', i_sorted
                print 'indices:', indices
                print 'p_train:', len(p_train), p_train
                print 'i_sorted:', len(i_sorted), i_sorted

                # return the good indices back to the pool
                self.i_labels[i] = np.hstack((self.i_labels[i], i_good))

                # draw replacement samples for the good indices
                i_new = self.i_labels[i][:n_good]
                self.i_labels[i] = self.i_labels[i][n_good:]

                # combine with the bad indices to comprise the new
                # training batch
                self.i_train_labels[i] = np.hstack((i_new, i_bad))
            else:
                self.i_train_labels[i] = self.i_labels[i][:n_draw]
                self.i_labels[i] = self.i_labels[i][n_draw:]

            # add the label indices to the training batch
            self.i_train = np.concatenate(
                (self.i_train, self.i_train_labels[i]))

    def rotateSamples(self):
        print 'rotateSamples....'
        # randomly rotate samples
        n_half = len(self.i_train) / 2
        indices = np.random.choice(self.i_train, n_half, replace=False)

        for index in indices:
            #print 'itrain:', self.i_train
            #print 'index:', index
            patch = self.x_data[index]
            #print 'patch.a:',patch
            patch = np.reshape(patch, (self.patchSize, self.patchSize))
            if random.random() < 0.5:
                patch = np.fliplr(patch)
            else:
                patch = np.fliplr(patch)
            patch = patch.flatten()
            #print 'patch.b:',patch
            self.x_data[index] = patch
            #exit(1)

    def justDraw(self):
        print 'just draw...'
        i = self.i[:self.n_train]
        self.i_train = np.random.choice(i, self.n_train, replace=False)

        if (self.iteration % 2 == 0):
            self.rotateSamples()

    def sampleTrainingData(self):

        self.iteration += 1
        '''	
                if self.iteration == Model.BestValidationInterval:
                        self.drawRandomizedSamples()
                else:
                        self.drawStratifiedSamples()
			#self.rotateSamples()
		'''

        self.justDraw()

        self.i_train = self.i_train.astype(dtype=int)

        lens = [len(l) for l in self.i_labels]
        print 'remaining sizes:', lens
        lens = [len(l) for l in self.i_train_labels]
        print 'ampleed sizes:', lens
        print 'train indices:', len(self.i_train)
        print self.i_train

        #t = self.i[:self.n_train]
        #self.i_train = np.random.choice( t, self.n_train, replace=False)
        #self.i_valid = np.random.choice( self.i, self.n_valid, replace=False)

        train_x = self.x_data[self.i_train]
        train_y = self.y_data[self.i_train]
        valid_x = self.x_data[self.i_valid]
        valid_y = self.y_data[self.i_valid]
        test_x = self.x_data[self.i_test]
        test_y = self.y_data[self.i_test]

        print 'tx:', np.shape(train_x)
        print 'ty:', np.shape(train_y)
        print 'vx:', np.shape(valid_x)
        print 'vy:', np.shape(valid_y)

        if (self.iteration == Model.BestValidationInterval):
            self.best_loss = np.inf
            self.iteration = 0

        if self.initialized:
            self.lr_shared.set_value(np.float32(self.learning_rate))
            self.m_shared.set_value(np.float32(self.momentum))

            self.train_x.set_value(np.float32(train_x))
            self.valid_x.set_value(np.float32(valid_x))
            self.test_x.set_value(np.float32(test_x))

            self.train_y.owner.inputs[0].set_value(np.int32(train_y))
            self.valid_y.owner.inputs[0].set_value(np.int32(valid_y))
            self.test_y.owner.inputs[0].set_value(np.int32(test_y))
        else:
            self.y = T.ivector(
                'y')  # the labels are presented as 1D vector of [int] labels
            self.lr = T.scalar('learning_rate')
            self.m = T.scalar('momentum')

            self.lr_shared = theano.shared(np.float32(self.learning_rate))
            self.m_shared = theano.shared(np.float32(self.momentum))

            self.train_x = theano.shared(train_x, borrow=True)
            self.valid_x = theano.shared(valid_x, borrow=True)
            self.test_x = theano.shared(test_x, borrow=True)

            self.train_y = theano.shared(train_y, borrow=True)
            self.valid_y = theano.shared(valid_y, borrow=True)
            self.test_y = theano.shared(test_y, borrow=True)

            self.train_y = T.cast(self.train_y, 'int32')
            self.valid_y = T.cast(self.valid_y, 'int32')
            self.test_y = T.cast(self.test_y, 'int32')
            self.intialized = True

    def sampleTrainingDataold(self):

        # iteration counter
        self.iteration += 1

        if self.iteration == Model.BestValidationInterval:
            self.drawRandomizedSamples()
        else:
            self.drawStratifiedSamples()

        if len(self.i_train) > 0:
            print 'second....'
            self.i_train = []
            if self.iteration == Model.BestValidationInterval:
                self.drawRandomizedSamples()
            else:
                self.drawStratifiedSamples()

            for i, n_draw in enumerate(self.n_draws):
                indices = self.i_train_labels[i]
                p_train = self.p_data[indices]
                i_sorted = np.argsort(p_train, axis=0)
                n_good = len(np.where(p_train[i_sorted] == 0)[0])
                n_bad = len(indices) - n_good
                #n_good   = max( n_good, n_draw/2)
                #i_good   = i_sorted[ : n_good ]
                #i_bad    = i_sorted[ n_good: ]

                n_threshold = int(len(indices) * 0.30)

                print 'n_good:', n_good
                print 'n_bad:', n_bad
                print 'min:', n_threshold
                # if not enough bad samples, just pick random samples
                if self.iteration == Model.BestValidationInterval:
                    print '--->random sampoing<----'
                    self.i_labels[i] = np.hstack(
                        (self.i_labels[i], self.i_train_labels[i]))
                    n_ldata = len(self.i_labels[i])
                    print 'n_ldata:', n_ldata
                    self.i_labels[i] = np.random.choice(self.i_labels[i],
                                                        n_ldata,
                                                        replace=False)
                    self.i_train_labels[i] = self.i_labels[i][:n_draw]
                    self.i_labels[i] = self.i_labels[i][n_draw:]
                    self.i_train = np.concatenate(
                        (self.i_train, self.i_train_labels[i]))
                    self.drawRandomizedSamples()

                else:
                    self.drawStratifiedSamples()
                    print '--->5050 sampoing<-----'
                    # keep 50% of the bad samples and replace the rest
                    n_good = max(n_good, n_draw / 2)
                    i_good = i_sorted[:n_good]
                    i_bad = i_sorted[n_good:]

                    self.i_labels[i] = np.hstack((self.i_labels[i], i_good))
                    i_new = self.i_labels[i][:n_good]
                    self.i_train_labels[i] = np.hstack((i_new, i_bad))
                    self.i_train = np.concatenate(
                        (self.i_train, self.i_train_labels[i]))
        else:
            #self.drawStratifiedSamples()
            self.i_train = []
            self.i_train_labels = [[] for l in self.n_draws]

            for i, n_draw in enumerate(self.n_draws):
                self.i_train_labels[i] = self.i_labels[i][:n_draw]
                self.i_labels[i] = self.i_labels[i][n_draw:]
                self.i_train = np.concatenate(
                    (self.i_train, self.i_train_labels[i]))

                #i_draw = np.random.choice( self.i_labels[i], n_draw, replace=False )
                print i, n_draw, len(self.i_train_labels[i]), len(
                    self.i_labels[i])

        self.i_train = self.i_train.astype(dtype=int)
        lens = [len(l) for l in self.i_labels]
        print 'remaining sizes:', lens
        lens = [len(l) for l in self.i_train_labels]
        print 'ampleed sizes:', lens
        print 'train indices:', len(self.i_train)
        print self.i_train
        #self.i       = self.i[ self.n_train: ]
        #self.i_train = self.i[ 0 : self.n_train ]
        #self.i       = self.i[ self.n_train: ]

        train_x = self.x_data[self.i_train]
        train_y = self.y_data[self.i_train]
        valid_x = self.x_data[self.i_valid]
        valid_y = self.y_data[self.i_valid]
        test_x = self.x_data[self.i_test]
        test_y = self.y_data[self.i_test]

        print 'tx:', np.shape(train_x)
        print 'ty:', np.shape(train_y)
        print 'vx:', np.shape(valid_x)
        print 'vy:', np.shape(valid_y)

        exit(1)

        if (self.iteration == Model.BestValidationInterval):
            self.best_loss = np.inf
            self.iteration = 0

        if self.initialized:
            print 'exiting here....'

            self.lr_shared.set_value(np.float32(self.learning_rate))
            self.m_shared.set_value(np.float32(self.momentum))

            self.train_x.set_value(np.float32(train_x))
            self.valid_x.set_value(np.float32(valid_x))
            self.test_x.set_value(np.float32(test_x))

            self.train_y.owner.inputs[0].set_value(np.int32(train_y))
            self.valid_y.owner.inputs[0].set_value(np.int32(valid_y))
            self.test_y.owner.inputs[0].set_value(np.int32(test_y))
        else:

            # allocate symbolic variables for the data
            #self.index = T.lscalar()     # index to a [mini]batch
            #self.x     = T.matrix('x')   # the data is presented as rasterized images
            #self.x     = self.input
            self.y = T.ivector(
                'y')  # the labels are presented as 1D vector of [int] labels
            self.lr = T.scalar('learning_rate')
            self.m = T.scalar('momentum')

            self.lr_shared = theano.shared(np.float32(self.learning_rate))
            self.m_shared = theano.shared(np.float32(self.momentum))

            self.train_x = theano.shared(train_x, borrow=True)
            self.valid_x = theano.shared(valid_x, borrow=True)
            self.test_x = theano.shared(test_x, borrow=True)

            self.train_y = theano.shared(train_y, borrow=True)
            self.valid_y = theano.shared(valid_y, borrow=True)
            self.test_y = theano.shared(test_y, borrow=True)

            self.train_y = T.cast(self.train_y, 'int32')
            self.valid_y = T.cast(self.valid_y, 'int32')
            self.test_y = T.cast(self.test_y, 'int32')
            self.intialized = True

    def setupSegmentation(self):

        self.intialized = True

    def train(self):
        pass

    def classify(self, image):
        pass

    def predict(self, image):
        pass

    def save(self):
        print 'saving model...'
        pass

    def load(self):
        pass

    def reportTrainingStats(self,
                            elapsedTime,
                            batchIndex,
                            valLoss,
                            trainCost,
                            mode=0):
        DB.storeTrainingStats(self.id, valLoss, trainCost, mode=mode)
        msg = '(%0.1f)     %i     %f%%'%\
        (
           elapsedTime,
           batchIndex,
           valLoss
        )
        status = '[%f]' % (trainCost)
        Utility.report_status(msg, status)
Beispiel #13
0
from utility import enum

# a value of -1 means we don't have the information
# for booleans, we use an enum which follows the convention : -1 is unknown, 0 is False, 1 is True

TEAM_SIDE = enum(UNKNOWN=-1, LEFT_BOT=0, RIGHT_TOP=1)
SBIRE_TYPE = enum(UNKNOWN=-1, MELEE=0, RANGE=1, GIB=2, HUGE=3)
COST_TYPE = enum(UNKNOWN=-1, MANA=0, ENERGY=1, HEALTH=2, NOTHING=3)
CHAMPION = enum(UNKNOWN=-1, VEIGAR=0, TRISTANA=1, AMUMU=2, LULU=3, POPPY=4) # because yordles are op...
BOOL = enum(UNKNOWN=-1, FALSE=0, TRUE=1)