def main():
    cfg = Config()
    TVT, TMO = set_devices(cfg.sys_device_ids)
    data_loader = get_data_loader(cfg)
    spec_loss = SpectralCLusterLayer()
    model = Model(cfg.vector_size, cfg.fix_weight)
    model_w = DataParallel(model)
    optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr = cfg.lr)
    modules_optims = [model, optimizer]
    TMO(modules_optims)

    may_set_mode(modules_optims, 'train')
    for epoch in range(cfg.total_epoch):
        epoch_done = False
        step = 0
        while not epoch_done:
            step += 1
            ims, _, labels, epoch_done= data_loader.next_batch()
            ims_var = Variable(TVT(torch.from_numpy(ims).float()))
            batch_size = ims_var.size()[0]
            num_cluster = len(data_loader.ids)
            labels_matrix = np.zeros([batch_size, num_cluster], dtype=int)
            labels_matrix[range(batch_size), labels-1] = 1
            labels_var = TVT(torch.from_numpy(labels_matrix).float())
            optimizer.zero_grad()
            feat = model_w(ims_var)
            G = spec_loss.grad_F(feat, labels_var)
            feat.backward(gradient=G)
            optimizer.step()
            objective_value = labels_var.size()[1] - torch.sum(torch.mm(spec_loss.pseudo_inverse(labels_var),feat)  * torch.mm(spec_loss.pseudo_inverse(feat), labels_var).t())
            print("epoch %d --- loss value= %f" % (epoch, objective_value))
    print "Finished"
예제 #2
0
def logreg_train(args):
	d = load_file(args.dataset)
	try:
		# Sanitize dataset
		d = d.dropna(subset=['Herbology', 'Ancient Runes', 'Astronomy'])
		X = np.array(d.values[:, [8, 12, 7]], dtype=float)
		y = d.values[:, 1]

		# Init model
		if args.stochastic:
			args.batch = 1
		model = Model(args.iter, args.learning, int(args.batch) > 0, args.batch, args.precision, args.visualizer)
		
		# Normalize features
		X = np.array([normalize(t) for t in X.T]).T
		new_df = pd.DataFrame(X)

		# Convert guild names to integers indexes)
		Y = []
		for i in y:
			Y.append(model.feature_i[i])
		y = np.array(Y, dtype=int)
		y_unique = np.unique(y)

		# Execute logistic regression
		model.process_logreg(X, y)
	except Exception as e:
		print ("error : {0}".format(e))
예제 #3
0
def logreg_predict(args):
    d = load_file(args.dataset)
    v = load_file(args.values)

    try:
        # Sanitize dataset
        d = d.fillna(0)

        # Normalize features
        X = np.array(d.values[:, [8, 12, 7]], dtype=float)
        X = np.array([normalize(t) for t in X.T]).T
        X = np.insert(X, 0, 1, axis=1)

        theta = np.array(v.values[:, 1:].T, dtype=float)

        model = Model()
        prediction = model.hypothesis(theta, X)

        # Convert integers indexes to guild names)
        houses = np.argmax(prediction, axis=1)
        matching_houses = list(map(lambda v: model.i_feature[v], houses))

        write_prediction(matching_houses)
        print("houses.csv successfully written !")

        if args.show:
            greek_god_graph(matching_houses, X, model)

    except Exception as e:
        print("error : {0}".format(e))
        sys.exit(-1)
예제 #4
0
 def getting_model(self, args):
     print('getting model...')
     if args.train or args.test:
         self.model = Model(batch_size = self.batch_size , val_size = self.val_size, 
             max_len = self.max_len ,args = args , dictionary = self.dictionary)#
         self.model.compile()
         if (args.train and args.model_restore) or args.test:
             self.model.restore(mode = self.mode)
예제 #5
0
def init_db_models():
    with open(INIT_MODEL_DATA, 'r') as csvfile:
        model_list = csv.reader(csvfile, delimiter=',', quotechar='"')
        next(model_list, None)  # Skip header line
        for row in model_list:
            brand = Brand.query.get(row[1])
            model = Model(id=row[0], name=row[2], brand=brand)
            db.session.add(model)
    db.session.commit()
예제 #6
0
def updateModel(model_id):
    if not request.json or not 'name' in request.json or not 'id' in request.json:
        return jsonify({"result": False, "msg": "Failed to Update Model!"})

    model = Model(request.json['name'], request.json['dataset'])

    result = Model.updateModel(model, mysql)

    if result is True:
        return jsonify({"result": True, "msg": "Successfully Updated Model!"})

    return jsonify({"result": False, "msg": "Failed to Update Model!"})
예제 #7
0
def processUserResponse(update, context, user_msg):
    REPLY_MARKUP = telegram.ReplyKeyboardRemove()
    chat_id = update.effective_chat.id

    # Initiate new user object
    if chat_id not in ACTIVE_USERS:
        ACTIVE_USERS[chat_id] = User(chat_id, update.effective_chat.first_name)

    # Set user language
    if chat_id in ACTIVE_USERS.keys() and user_msg in config.sections(
    ) and ACTIVE_USERS[chat_id].getLang() == "DEFAULT":
        ACTIVE_USERS[chat_id].setLang(user_msg)

    # Set chat language
    user_lang = ACTIVE_USERS[chat_id].getLang()

    if user_lang == "DEFAULT" and len(config.sections()) > 1:
        REPLY_MARKUP = lang_reply_markup
        print(config.sections())
        MESSAGE = config['DEFAULT']['LANG_MESSAGE']
        REPLY_MARKUP = lang_reply_markup
    # If User questionary is already in process then process user response:
    elif chat_id in ACTIVE_USERS.keys() and ACTIVE_USERS[chat_id].isModel():
        MESSAGE = ACTIVE_USERS[chat_id].getModel().processQuestion(user_msg)
        REPLY_MARKUP = ACTIVE_USERS[chat_id].getModel().getMarkup()
        # If last question in the questionary:
        if ACTIVE_USERS[chat_id].getModel().getStatus() == 0:
            saveAnswers(update, context,
                        ACTIVE_USERS[chat_id].getModel().getAnswers())
            ACTIVE_USERS[chat_id].setModel("NA")
            MESSAGE += config[user_lang]['BYE_MESSAGE']
    # If User is in list, but have not started questionary:
    # Initialize Questionary
    elif chat_id in ACTIVE_USERS.keys(
    ) and user_msg in config[user_lang]['categories'].split(","):
        ACTIVE_USERS[chat_id].setModel(
            Model(model_name=config[user_lang]['models'].split(",")[
                config[user_lang]['categories'].split(",").index(user_msg)],
                  user_lang=user_lang))
        MESSAGE = ACTIVE_USERS[chat_id].getModel().processQuestion(user_msg)

    # Init new user
    # Show greeting message one more time
    else:
        REPLY_MARKUP = start_reply_markup(user_lang)
        MESSAGE=config[user_lang]['GREETING_WORD']+" " \
                +ACTIVE_USERS[chat_id].getName()+"! "+config[user_lang]['WELCOME_MESSAGE']

    context.bot.send_message(chat_id=chat_id,
                             text=MESSAGE,
                             parse_mode=telegram.ParseMode.HTML,
                             reply_markup=REPLY_MARKUP)
예제 #8
0
def get_model(input_channels,
              input_time_length,
              dilations=None,
              kernel_sizes=None,
              padding=False):
    """
    initializes a new Deep4Net and changes the kernel sizes and dilations of the network based on the input parameters
    :param input_channels: 1 axis input shape
    :param input_time_length: 0 axis input shape
    :param dilations: dilations of the max-pool layers of the network
    :param kernel_sizes: kernel sizes of the max-pool layers of the network
    :param padding: if padding is to be added

    :return: a Model object, the changed Deep4Net based on the kernel sizes and dilation parameters and the name
    of the model based on the kernel sizes and dilatiosn
    """
    if kernel_sizes is None:
        kernel_sizes = [3, 3, 3, 3]
    print('SBP False!!!')
    model = Model(input_channels=input_channels,
                  n_classes=1,
                  input_time_length=input_time_length,
                  final_conv_length=2,
                  stride_before_pool=False)
    model.make_regressor()
    if cuda:
        model.model = model.model.cuda()

    model_name = get_model_name_from_kernel_and_dilation(
        kernel_sizes, dilations)

    changed_model = change_network_kernel_and_dilation(model.model,
                                                       kernel_sizes,
                                                       dilations,
                                                       remove_maxpool=False)
    # print(changed_model)

    return model, changed_model, model_name
def factory_model():
    if Model.counter == 0:
        return Model()
def initialize_model(global_path, image_size, image_format, config, loss_type):
    model = None
    torch.cuda.empty_cache()
    gc.collect()

    epochs, lr, leaky_thresh, lamda, beta1, beta2 = get_model_params(config)

    if loss_type == 'hybrid_l1':
        model = Hybrid_L1_Model(base_path=global_path,
                                image_size=image_size,
                                image_format=image_format,
                                epochs=epochs,
                                learning_rate=lr,
                                leaky_relu=leaky_thresh,
                                lamda=lamda,
                                betas=(beta1, beta2))
    elif loss_type == 'hybrid_l2':
        model = Hybrid_L2_Model(base_path=global_path,
                                image_size=image_size,
                                image_format=image_format,
                                epochs=epochs,
                                learning_rate=lr,
                                leaky_relu=leaky_thresh,
                                lamda=lamda,
                                betas=(beta1, beta2))
    elif loss_type == 'l1':
        model = L1_Model(base_path=global_path,
                         image_size=image_size,
                         image_format=image_format,
                         epochs=epochs,
                         learning_rate=lr,
                         leaky_relu=leaky_thresh,
                         lamda=lamda,
                         betas=(beta1, beta2))
    elif loss_type == 'l2':
        model = L2_Model(base_path=global_path,
                         image_size=image_size,
                         image_format=image_format,
                         epochs=epochs,
                         learning_rate=lr,
                         leaky_relu=leaky_thresh,
                         lamda=lamda,
                         betas=(beta1, beta2))
    elif loss_type == 'perpetual':
        model = Perpetual_Model(base_path=global_path,
                                image_size=image_size,
                                image_format=image_format,
                                epochs=epochs,
                                learning_rate=lr,
                                leaky_relu=leaky_thresh,
                                lamda=lamda,
                                betas=(beta1, beta2))
    elif loss_type == 'default':
        model = Model(base_path=global_path,
                      image_size=image_size,
                      image_format=image_format,
                      epochs=epochs,
                      learning_rate=lr,
                      leaky_relu=leaky_thresh,
                      lamda=lamda,
                      betas=(beta1, beta2))
    else:
        raise NotImplementedError(
            'This Loss function has not been implemented!')

    average_loss = AverageLoss(os.path.join(global_path, 'Loss_Checkpoints'))

    return model, average_loss
예제 #11
0
import sys
from utils import readjson
from models.SimpleRergression import Linear
from utils.DataLoader import DataLoader
from models.Model import Model

if __name__ == '__main__':
    config = readjson(sys.argv[1])
    linear = Linear(**config['linear'])
    dataloader = DataLoader(**config['dataloader'])
    modal = Model(linear, dataloader, **config['modal'])
    modal.fit()
예제 #12
0
 def __init__(self):
     self._close_funcs = []
     self.model = Model()
     self.fig_widget = None
     self.main_view = MainView(self)
     self.main_view.show()
예제 #13
0
# creates flask apirest full
app = Flask(__name__, template_folder='views')
api = Api(app)

# configures mongodb connectrion string
# app.config["MONGO_URI"] = "mongodb://{}:{}@{}:{}/{}".format(cfg.username,
#                                                             cfg.password,
#                                                             cfg.host,
#                                                             cfg.port,
#                                                             cfg.dbname)

app.config["MONGO_URI"] = "mongodb://{}:{}/{}".format(cfg.host, cfg.port,
                                                      cfg.dbname)

# creates the model
model = Model(PyMongo(app))

# register the api routes and controllers
api.add_resource(ControllerHome, '/', resource_class_kwargs={'model': model})

api.add_resource(ControllerSats,
                 '/sats',
                 resource_class_kwargs={
                     'model': model,
                     'publicKey': secretKey['public']
                 })

api.add_resource(ControllerToken,
                 '/token/<int:id>/<int:minutes>/<token>',
                 resource_class_kwargs={
                     'model': model,
예제 #14
0
# 各クラステスト実行用のクラス

from models.Model import Model
from NiceBoatUtils import RaceUtil
import boatticket as bt
import numpy as np

ml = Model()
pred = ml.predict(0)
print("predict race time")
print(pred)
print("win rate")
win_rate = RaceUtil.win_rate(pred)
print(win_rate)
ex = bt.exacta.Exacta(win_rate)
print("2連単")
print(ex.predict())

qu = bt.quinella.Quinella(win_rate)
print("2連複")
print(qu.predict())

tri = bt.trio.Trio(win_rate)
print("3連複")
print(tri.predict())
예제 #15
0
    """
    Tested on python version : 3.5.2
    """

    # parameters
    params = {
        "max_epoch": 60,
        "learning_rate": 0.001,
        "batch_size": 8,
        "post_padding_size": 10,
        "comment_padding_size": 20,
        "n_hidden": 100,
        "num_filters": 150,
        "filter_sizes": [3, 4, 5],
        "keep_prob_global_train": 0.6,
        "bidirectional": False,
        "binary_sentiment": False,
        "display_step": 100,
        "evaluate_every": 1,
        # Constants
        "word2vec_dim": 300,
        "n_classes_topics": len(Resources.topics()),
        "n_classes_emotion": len(Resources.emotions()),
        "n_classes_speech_acts": len(Resources.speech_acts()),
    }
    params["n_classes_sentiment"] = len(Resources.binary_sentiment(
    )) if params["binary_sentiment"] is True else len(Resources.sentiment())

    model = Model(params)
    model.start()
예제 #16
0
    def __init__(self, sys_argv):
        super(App, self).__init__(sys_argv)
        self.model = Model()
        # listView = QtGui.QListView()
        # listView.show
        red = QtGui.QColor(255, 0, 0)
        green = QtGui.QColor(0, 255, 0)
        blue = QtGui.QColor(0, 0, 255)
        rowCount = 4
        columnCount = 2
        tableData1 = [[QtGui.QColor("#FFFF00") for i in range(columnCount)]
                      for j in range(rowCount)]
        headers = ["Pallet0", "Colors"]

        entity = json2obj(
            '{"category":"groups","path":"/mnt/x19/mavisdev/projects/geotest/sequence/afg_0025","name":"afg_0025","description":"AFG_0025 sequence","fileImportPath":"","isGlobal":false,"project":"geotest","fields":{"priority":"medium","status":"idle"},"createdBy":"trevor","createdAt":"2016-09-13T20:28:04.745Z","updatedAt":"2017-05-31T21:38:19.935Z","id":"57d861546fef3a0001c87954","type":"sequence","mediaIds":[],"isTest":false}'
        )
        entity1 = json2obj(
            '{"category":"assets","path":"/mnt/x19/mavisdev/projects/geotest/globals/assets/wood_log","name":"wood_log","description":"a log that is wooden","fileImportPath":"","isGlobal":false,"project":"geotest","fields":{"priority":"medium","status":"review","grouping":"char","comp_status":"Ready","prod_status":"HIGH"},"createdBy":"dexplorer","createdAt":"2017-06-12T20:07:21.739Z","updatedAt":"2017-06-12T20:07:21.798Z","id":"593ef47973d9f40001cf898b","type":"assets","mediaIds":[],"isTest":false}'
        )
        entity2 = json2obj(
            '{"category":"assets","path":"/mnt/x19/mavisdev/projects/geotest/sequence/afg_0025/shots/afg_0025_0020/plates/plate_afg-0025__0020","name":"plate_afg-0025__0020","description":"plate asse for afg_0025_0020","latest":"583dc9eebc843d0001905bde","fileImportPath":"/mnt/x1/mavisdev/client_imports/geotest/afg_0025_0020/AFG_0025_0020_bg01_v001_LIN.exr","isGlobal":true,"project":"geotest","fields":{"priority":"low","status":"approved","startFrame":10,"endFrame":100,"pxAspect":1,"colorspace":"linear","fileType":"exr","width":1920,"height":1080,"lut":"","ccc":"","head":8,"tail":8,"handle":8},"createdBy":"trevor","createdAt":"2016-11-29T18:31:59.429Z","updatedAt":"2017-05-23T21:17:43.390Z","id":"583dc99fbc843d0001905bd9","type":"plates","mediaIds":[],"parentId":"57d861546fef3a0001c87960","isTest":false}'
        )
        entity3 = json2obj(
            '{"category":"tasks","path":"/mnt/x19/mavisdev/projects/geotest/globals/assets/wood_log/texture/tex_log","name":"tex_log","description":"texture the wood log","latest":"5941b18073d9f40001cf8a6c","fileImportPath":"","isGlobal":false,"project":"geotest","fields":{"priority":"urgent","status":"revised","grouping":"mtpg","comp_status":"In-Progress","prod_status":"HIGH"},"createdBy":"dexplorer","createdAt":"2017-06-12T20:08:10.814Z","updatedAt":"2017-06-14T21:58:24.772Z","id":"593ef4aa73d9f40001cf8992","type":"texture","mediaIds":[],"isTest":false}'
        )
        entity4 = json2obj(
            '{"category":"tasks","path":"/mnt/x19/mavisdev/projects/geotest/sequence/mdm_0202/shots/mdm_0202_0100/assets/tuktuka/model/tuktuk_model","name":"tuktuk_model","description":"published plate 6310","latest":"58c6ffe6e925cc00016a6b58","fileImportPath":"","isGlobal":false,"project":"geotest","fields":{"priority":"high","status":"revised","grouping":"vehi","comp_status":"Waiting","prod_status":"MEDIUM"},"createdBy":"trevor","createdAt":"2017-04-13T22:08:33.983Z","updatedAt":"2017-04-18T20:35:28.557Z","id":"589b4f9dc599d10001375de9","type":"model","mediaIds":[],"parentId":"589b4f10c599d10001375de2","isTest":false}'
        )

        rootNode = Node('Hips')
        childNode0 = TransformNode('LeftPirateleg', entity, rootNode)
        childNode1 = Node('RightLeg', entity1, rootNode)
        childNode2 = Node('RightFoot', entity2, childNode1)
        childNode3 = CameraNode('Xxxree', entity3, rootNode)
        childNode4 = LightNode('kldjskfds', entity4, childNode1)

        tree = TreeModel(rootNode)

        model2 = PaletteTableModel(tableData1, headers)
        self.main_ctrl = MainController(self.model)
        self.main_view = MainView(model=self.model, main_ctrl=self.main_ctrl)
        self.main_view.test(model2, tree=tree)
        self.main_view.show()

        # model2.insertRows(0, 5)
        # model2.insertColumns(0, 5)
        model2.removeColumns(1, 1)
        # tree.insertRows(0, 1)
        #
        #
        # self.threadClass = ThreadClass()
        # self.connect(self.threadClass, QtCore.SIGNAL('CPU_VALUE'), self.done)
        # self.threadClass.start()
        self.manager = QtNetwork.QNetworkAccessManager()
        self.manager.finished.connect(self.reply_finished)
        print(
            QtNetwork.QNetworkSession(QtNetwork.QNetworkConfigurationManager().
                                      defaultConfiguration()).State())
        self.request = QtNetwork.QNetworkRequest(
            QtCore.QUrl(
                'http://www.planwallpaper.com/static/images/1080p-HD-Wallpapers-9.jpg'
            ))
        print("Sending request")
        self.manager.get(self.request)
        self.manager2 = QtNetwork.QNetworkAccessManager()
        self.manager2.finished.connect(self.reply_finished)
        print(
            QtNetwork.QNetworkSession(QtNetwork.QNetworkConfigurationManager().
                                      defaultConfiguration()).State())
        self.request = QtNetwork.QNetworkRequest(
            QtCore.QUrl('http://lorempixel.com/1800/1400/city/'))
        print("Sending request")
        self.manager2.get(self.request)

        self.manager2 = QtNetwork.QNetworkAccessManager()
        self.manager2.finished.connect(self.reply_finished)
        print(
            QtNetwork.QNetworkSession(QtNetwork.QNetworkConfigurationManager().
                                      defaultConfiguration()).State())
        self.request = QtNetwork.QNetworkRequest(
            QtCore.QUrl('http://lorempixel.com/1800/1400/city/'))
        print("Sending request")
        self.manager2.get(self.request)

        self.manager3 = QtNetwork.QNetworkAccessManager()
        self.manager3.finished.connect(self.reply_finished)
        print(
            QtNetwork.QNetworkSession(QtNetwork.QNetworkConfigurationManager().
                                      defaultConfiguration()).State())
        self.request = QtNetwork.QNetworkRequest(
            QtCore.QUrl('http://lorempixel.com/1800/1400/city/'))
        print("Sending request")
        self.manager3.get(self.request)