import sys import os import PyTorch import PyTorchHelpers import numpy as np Luabit = PyTorchHelpers.load_lua_class('luabit.lua', 'Luabit') batchSize = 2 numFrames = 4 inSize = 3 outSize = 3 kernelSize = 3 luabit = Luabit('green') print(luabit.getName()) print('type(luabit)', type(luabit)) inTensor = np.random.randn(batchSize, numFrames, inSize).astype('float32') luain = PyTorch.asFloatTensor(inTensor) luaout = luabit.getOut(luain, outSize, kernelSize) outTensor = luaout.asNumpyTensor() print('outTensor', outTensor) res = luabit.printTable({'color': 'red', 'weather': 'sunny', 'anumber': 10, 'afloat': 1.234}, 'mistletoe', { 'row1': 'col1', 'meta': 'data'}) print('res', res)
def Example2(): # init model_class = PyTorchHelpers.load_lua_class("ModelExample2.lua", 'ModelExample2') torch_model = model_class("cuda", 0.001) torch_model.build_model((3, 100, 100), 12, 5) torch_model.init_model() torch_model.show_model() # define inputs/labels img1 = np.ones((2, 3, 100, 100), dtype=np.float32) img1[0, :, :, :] = -0.5 img1[1, :, :, :] = 0.5 img2 = np.ones((2, 3, 100, 100), dtype=np.float32) img2[0, :, :, :] = 0.5 img2[1, :, :, :] = -0.5 label1 = np.ones((2, 2), dtype=np.float32) label1[0, :] = 0 label2 = np.ones((2, 2), dtype=np.float32) label2[0, :] = 0 return torch_model, [img1, img2], [label1, label2]
def extract_featrues(image_dir, list_of_filenames=False, model_path=model_path_default, batch_size=1): """ This method provides functionality of extract features for images given a pretrained model. @params: model_path : path to the trained model e.g. models-torch/resnet-18.t7 image_dir : directory of images to be predicted e.g. ../data/paralogo list_of_filenames : e.g. ['dir/1.jpg', 'dir/2.jpg'] Note: False in python (not None)= nil in lua batch_size : #image test for one epoch @return : features : a float tensor for image features """ # Load a lua class from a lua file FeatureExtractor = PyTorchHelpers.load_lua_class('my-extract-features.lua', 'FeatureExtractor') # Construct a object for the class fe = FeatureExtractor(model_path) # Feature extraction mothod called features = fe.extract(image_dir, list_of_filenames, batch_size) return features
def dostuff(name): MyLuaClass = PyTorchHelpers.load_lua_class('testthreading.lua', 'MyLuaClass') print(name, 'dostuff start') obj = MyLuaClass(name) print('calling run', name) obj.run() print(name, 'dostuff done')
def test_initThrow(): ThrowsErrorOnInit = PyTorchHelpers.load_lua_class('test/test_throw.lua', 'ThrowsErrorOnInit') noException = True try: throwsErrorOnInit = ThrowsErrorOnInit() except Exception as e: noException = False print('caught exception', e) traceback.print_exc() # e.printstacktrace() assert(not noException)
def test_subthrow(): """ check that we get the full stack trace, not just the point of failure """ ThrowsError = PyTorchHelpers.load_lua_class('test/test_throw.lua', 'ThrowsError') throwsError = ThrowsError() try: throwsError.insub_anteater() except Exception as e: print('error', e) assert 'test_throw.lua:18' in str(e)
def test_initThrow(): ThrowsErrorOnInit = PyTorchHelpers.load_lua_class('test/test_throw.lua', 'ThrowsErrorOnInit') noException = True try: throwsErrorOnInit = ThrowsErrorOnInit() except Exception as e: noException = False print('caught exception', e) traceback.print_exc() # e.printstacktrace() assert(not noException) print('Note that it\'s normal this throws an exception. Its a test of exception throwing :-)')
def run(): TorchModel = PyTorchHelpers.load_lua_class('torch_model.lua', 'TorchModel') torchModel = TorchModel(backend, 28, 10) mndata = MNIST('../../data/mnist') imagesList, labelsList = mndata.load_training() labels = np.array(labelsList, dtype=np.uint8) images = np.array(imagesList, dtype=np.float32) labels += 1 # since torch/lua labels are 1-based N = labels.shape[0] print('loaded mnist training data') if numTrain > 0: N = min(N, numTrain) print('numExamples N', N) numBatches = N // batchSize for epoch in range(numEpochs): epochLoss = 0 epochNumRight = 0 for b in range(numBatches): res = torchModel.trainBatch( learningRate, images[b * batchSize:(b + 1) * batchSize], labels[b * batchSize:(b + 1) * batchSize]) # print('res', res) numRight = res['numRight'] loss = res['loss'] epochNumRight += numRight epochLoss += loss print('epoch ' + str(epoch) + ' batch ' + str(b) + ' accuracy: ' + str(numRight * 100.0 / batchSize) + '%') print('epoch ' + str(epoch) + ' accuracy: ' + str(epochNumRight * 100.0 / N) + '%') print('finished training') print('loading test data...') imagesList, labelsList = mndata.load_testing() labels = np.array(labelsList, dtype=np.uint8) images = np.array(imagesList, dtype=np.float32) labels += 1 # since torch/lua labels are 1-based N = labels.shape[0] print('loaded mnist testing data') numBatches = N // batchSize epochLoss = 0 epochNumRight = 0 for b in range(numBatches): predictions = torchModel.predict( images[b * batchSize:(b + 1) * batchSize]).asNumpyTensor().reshape(batchSize) labelsBatch = labels[b * batchSize:(b + 1) * batchSize] numRight = (predictions == labelsBatch).sum() epochNumRight += numRight print('test results: accuracy: ' + str(epochNumRight * 100.0 / N) + '%')
def test_initThrow(): ThrowsErrorOnInit = PyTorchHelpers.load_lua_class('test/test_throw.lua', 'ThrowsErrorOnInit') noException = True try: throwsErrorOnInit = ThrowsErrorOnInit() except Exception as e: noException = False print('caught successfully raised exception', e) traceback.print_exc() # e.printstacktrace() assert(not noException) print('Note that it\'s normal this throws an exception. Its a test of exception throwing :-)')
def test_call_lua(): TestCallLua = PyTorchHelpers.load_lua_class('test/test_call_lua.lua', 'TestCallLua') batchSize = 2 numFrames = 4 inSize = 3 outSize = 3 kernelSize = 3 luabit = TestCallLua('green') print(luabit.getName()) assert luabit.getName() == 'green' print('type(luabit)', type(luabit)) assert str(type(luabit)) == '<class \'PyTorchLua.TestCallLua\'>' np.random.seed(123) inTensor = np.random.randn(batchSize, numFrames, inSize).astype('float32') luain = PyTorch.asFloatTensor(inTensor) luaout = luabit.getOut(luain, outSize, kernelSize) outTensor = luaout.asNumpyTensor() print('outTensor', outTensor) # I guess we just assume if we got to this point, with no exceptions, then thats a good thing... # lets add some new test... outTensor = luabit.addThree(luain).asNumpyTensor() assert isinstance(outTensor, np.ndarray) assert inTensor.shape == outTensor.shape assert np.abs((inTensor + 3) - outTensor).max() < 1e-4 res = luabit.printTable({'color': 'red', 'weather': 'sunny', 'anumber': 10, 'afloat': 1.234}, 'mistletoe', { 'row1': 'col1', 'meta': 'data'}) print('res', res) assert res == {'foo': 'bar', 'result': 12.345, 'bear': 'happy'} # List and tuple support by conversion to dictionary reslist = luabit.modifyList([3.1415, r'~Python\omega', 42]) restuple = luabit.modifyList((3.1415, r'~Python\omega', 42)) assert len(reslist) == len(restuple) == 4 assert list(reslist.keys()) == list(restuple.keys()) == [1, 2, 3, 4] assert reslist[1] == restuple[1] assert (reslist[1] - 3.1415) < 1e-7 reslist.pop(1) restuple.pop(1) assert reslist == restuple == {2: r'~Python\omega', 3: 42, 4: 'Lorem Ipsum'} # Get an object created from scratch by Lua res = luabit.getList() assert res[1] == 3.1415 res.pop(1) assert res == {2: 'Lua', 3: 123}
def test_FunctionThrow(): ThrowsError = PyTorchHelpers.load_lua_class('test/test_throw.lua', 'ThrowsError') throwsError = ThrowsError() noException = True try: throwsError.go() except Exception as e: noException = False print('caught exception', e) traceback.print_exc() # e.printstacktrace() assert(not noException)
def test_FunctionThrow(): ThrowsError = PyTorchHelpers.load_lua_class('test/test_throw.lua', 'ThrowsError') throwsError = ThrowsError() noException = True try: throwsError.go() except Exception as e: noException = False print('caught exception', e) traceback.print_exc() # e.printstacktrace() assert(not noException) print('Note that it\'s normal this throws an exception. Its a test of exception throwing :-)')
def run(): TorchModel = PyTorchHelpers.load_lua_class('torch_model.lua', 'TorchModel') torchModel = TorchModel(backend, 28, 10) mndata = MNIST('../../data/mnist') imagesList, labelsList = mndata.load_training() labels = np.array(labelsList, dtype=np.uint8) images = np.array(imagesList, dtype=np.float32) labels += 1 # since torch/lua labels are 1-based N = labels.shape[0] print('loaded mnist training data') if numTrain > 0: N = min(N, numTrain) print('numExamples N', N) numBatches = N // batchSize for epoch in range(numEpochs): epochLoss = 0 epochNumRight = 0 for b in range(numBatches): res = torchModel.trainBatch( learningRate, images[b * batchSize:(b+1) * batchSize], labels[b * batchSize:(b+1) * batchSize]) # print('res', res) numRight = res['numRight'] loss = res['loss'] epochNumRight += numRight epochLoss += loss print('epoch ' + str(epoch) + ' batch ' + str(b) + ' accuracy: ' + str(numRight * 100.0 / batchSize) + '%') print('epoch ' + str(epoch) + ' accuracy: ' + str(epochNumRight * 100.0 / N) + '%') print('finished training') print('loading test data...') imagesList, labelsList = mndata.load_testing() labels = np.array(labelsList, dtype=np.uint8) images = np.array(imagesList, dtype=np.float32) labels += 1 # since torch/lua labels are 1-based N = labels.shape[0] print('loaded mnist testing data') numBatches = N // batchSize epochLoss = 0 epochNumRight = 0 for b in range(numBatches): predictions = torchModel.predict(images[b * batchSize:(b+1) * batchSize]).asNumpyTensor().reshape(batchSize) labelsBatch = labels[b * batchSize:(b+1) * batchSize] numRight = (predictions == labelsBatch).sum() epochNumRight += numRight print('test results: accuracy: ' + str(epochNumRight * 100.0 / N) + '%')
def config_model(data, dataset): dataset_metadata = dataset.metadata gpu_device = int(data["gpu_device"]) learning_rate = float(data["training_param"]["learning_rate"]) learning_rate_decay = float(data["training_param"]["learning_rate_decay"]) weight_decay = float(data["training_param"]["weight_decay"]) input_size = int(data["training_param"]["input_size"]) linear_size = int(data["training_param"]["linear_size"]) convo1_size = int(data["training_param"]["convo1_size"]) convo2_size = int(data["training_param"]["convo2_size"]) model_finetune = data["model_finetune"] model_class = PyTorchHelpers.load_lua_class(data["training_param"]["file"], 'RGBDTracker') tracker_model = model_class('cuda', 'adam', gpu_device) tracker_model.set_configs({ "input_size": input_size, "linear_size": linear_size, "convo1_size": convo1_size, "convo2_size": convo2_size, }) if model_finetune == "": tracker_model.build_model() tracker_model.init_model() else: tracker_model.load(model_finetune) tracker_model.set_configs({ "learningRate": learning_rate, "learningRateDecay": learning_rate_decay, "weightDecay": weight_decay, # Necessary data at test time, the user can get all information while loading the model and its configs "translation_range": float(dataset_metadata["translation_range"]), "rotation_range": float(dataset_metadata["rotation_range"]), "render_scale": dataset_metadata["object_width"], "mean_matrix": dataset.mean, "std_matrix": dataset.std }) return tracker_model
def test_FunctionThrow(): ThrowsError = PyTorchHelpers.load_lua_class('test/test_throw.lua', 'ThrowsError') throwsError = ThrowsError() noException = True try: throwsError.go() except Exception as e: noException = False print('caught successfully raised exception', e) traceback.print_exc() # e.printstacktrace() assert (not noException) print( 'Note that it\'s normal this throws an exception. Its a test of exception throwing :-)' )
def Example1(): # init model_class = PyTorchHelpers.load_lua_class("ModelExample.lua", 'ModelExample') torch_model = model_class("cuda", 0.001) torch_model.build_model((3, 100, 100), 12, 5) torch_model.init_model() torch_model.show_model() # define inputs/labels img = np.ones((2, 3, 100, 100), dtype=np.float32) img[0, :, :, :] = -0.5 img[1, :, :, :] = 0.5 label = np.ones((2, 2), dtype=np.float32) label[0, :] = 0 return torch_model, img, label
def __init__(self, camera, model_path, object_width=0): self.image_size = None self.tracker_model = None self.translation_range = None self.rotation_range = None self.mean = None self.std = None self.debug_rgb = None self.debug_background = None self.camera = camera self.object_width = object_width # setup model model_class = PyTorchHelpers.load_lua_class(model_path, 'RGBDTracker') self.tracker_model = model_class('cuda', 'adam', 1) self.input_buffer = None self.prior_buffer = None
def __init__(self, camera, model_path, object_width=0, model_3d_path="", model_3d_ao_path="", shader_path=""): self.image_size = None self.tracker_model = None self.translation_range = None self.rotation_range = None self.mean = None self.std = None self.debug_rgb = None self.debug_background = None self.camera = camera self.object_width = object_width # setup model model_class = PyTorchHelpers.load_lua_class(model_path, 'RGBDTracker') self.tracker_model = model_class('cuda') if model_3d_path != "" and model_3d_ao_path != "" and shader_path != "": self.setup_renderer(model_3d_path, model_3d_ao_path, shader_path) self.input_buffer = None self.prior_buffer = None
django.setup() from django.conf import settings from chat.utils import log_to_terminal from chat.models import Job, Dialog import chat.constants as constants import PyTorch import PyTorchHelpers import pika import time import yaml import json import traceback VisDialModel = PyTorchHelpers.load_lua_class(constants.VISDIAL_LUA_PATH, 'VisDialTorchModel') VisDialATorchModel = VisDialModel( constants.VISDIAL_CONFIG['input_json'], constants.VISDIAL_CONFIG['load_path'], constants.VISDIAL_CONFIG['beamSize'], constants.VISDIAL_CONFIG['beamLen'], constants.VISDIAL_CONFIG['sampleWords'], constants.VISDIAL_CONFIG['temperature'], constants.VISDIAL_CONFIG['gpuid'], constants.VISDIAL_CONFIG['backend'], constants.VISDIAL_CONFIG['proto_file'], constants.VISDIAL_CONFIG['model_file'], constants.VISDIAL_CONFIG['maxThreads'], constants.VISDIAL_CONFIG['encoder'], constants.VISDIAL_CONFIG['decoder'],
def create(): TorchModel = PyTorchHelpers.load_lua_class( 'pose-hg-demo/python/TorchModel.lua', 'TorchModel') TorchModel = TorchModel() return TorchModel
django.setup() from django.conf import settings from chat.utils import log_to_terminal from chat.models import Job, Dialog import chat.constants as constants import PyTorch import PyTorchHelpers import pika import time import yaml import json import traceback VisDialModel = PyTorchHelpers.load_lua_class( constants.VISDIAL_LUA_PATH, 'VisDialTorchModel') VisDialATorchModel = VisDialModel( constants.VISDIAL_CONFIG['input_json'], constants.VISDIAL_CONFIG['load_path'], constants.VISDIAL_CONFIG['beamSize'], constants.VISDIAL_CONFIG['beamLen'], constants.VISDIAL_CONFIG['sampleWords'], constants.VISDIAL_CONFIG['temperature'], constants.VISDIAL_CONFIG['gpuid'], constants.VISDIAL_CONFIG['backend'], constants.VISDIAL_CONFIG['proto_file'], constants.VISDIAL_CONFIG['model_file'], constants.VISDIAL_CONFIG['maxThreads'], constants.VISDIAL_CONFIG['encoder'], constants.VISDIAL_CONFIG['decoder'],
import sys import os import PyTorch import PyTorchHelpers import numpy as np Luabit = PyTorchHelpers.load_lua_class('luabit.lua', 'Luabit') batchSize = 2 numFrames = 4 inSize = 3 outSize = 3 kernelSize = 3 luabit = Luabit('green') print(luabit.getName()) print('type(luabit)', type(luabit)) inTensor = np.random.randn(batchSize, numFrames, inSize).astype('float32') luain = PyTorch.asFloatTensor(inTensor) luaout = luabit.getOut(luain, outSize, kernelSize) outTensor = luaout.asNumpyTensor() print('outTensor', outTensor) res = luabit.printTable( { 'color': 'red', 'weather': 'sunny', 'anumber': 10,
# # Copyright © 2016 Changxu Wang <*****@*****.**> # # Distributed under terms of the MIT license. import PyTorchHelpers import os import sys import cv2 import numpy basedir = os.path.dirname(os.path.realpath(__file__)) model_name = 'model-34.t7' Model = PyTorchHelpers.load_lua_class('model.lua', 'Model') model = Model(model_name) def locate(images): resized_images = [cv2.resize(image, (448, 224)) for image in images] input_images = numpy.array(resized_images, dtype=numpy.float32) / 255.0 output = model.forward(input_images).asNumpyTensor() output_images = [] for i, image in enumerate(images): height, width = image.shape[:2] keypoints = output[i].reshape((4,2)) keypoints[:, 0] *= width keypoints[:, 1] *= height keypoints = numpy.round(keypoints)
def loadModels(): global Classify Classify = PyTorchHelpers.load_lua_class('nnets/classify.lua', 'Classify')
import PyTorchHelpers import os ESCAPE_KEY = 1048603 UNITY_DEMO = False if __name__ == '__main__': class_path = "deeptracking/tracker/rgbd_tracker.lua" model_class = PyTorchHelpers.load_lua_class(class_path, 'RGBDTracker') tracker_model = model_class('cuda') model_path = "/home/mathieu/Dataset/DeepTrack/model/mixed_skull" input_model = "mixed_skull5" output_model = "mixed_skull_cpu" tracker_model.load(os.path.join(model_path, input_model)) tracker_model.convert_backend("cpu") tracker_model.save(os.path.join(model_path, output_model))
# # Copyright © 2016 Changxu Wang <*****@*****.**> # # Distributed under terms of the MIT license. import PyTorchHelpers import os import sys import cv2 import numpy basedir = os.path.dirname(os.path.realpath(__file__)) model_name = 'model-34.t7' Model = PyTorchHelpers.load_lua_class('model.lua', 'Model') model = Model(model_name) def locate(images): resized_images = [cv2.resize(image, (448, 224)) for image in images] input_images = numpy.array(resized_images, dtype=numpy.float32) / 255.0 output = model.forward(input_images).asNumpyTensor() output_images = [] for i, image in enumerate(images): height, width = image.shape[:2] keypoints = output[i].reshape((4, 2)) keypoints[:, 0] *= width keypoints[:, 1] *= height
def test_call_lua(): TestCallLua = PyTorchHelpers.load_lua_class('test/test_call_lua.lua', 'TestCallLua') batchSize = 2 numFrames = 4 inSize = 3 outSize = 3 kernelSize = 3 luabit = TestCallLua('green') print(luabit.getName()) assert luabit.getName() == 'green' print('type(luabit)', type(luabit)) assert str(type(luabit)) == '<class \'PyTorchLua.TestCallLua\'>' np.random.seed(123) inTensor = np.random.randn(batchSize, numFrames, inSize).astype('float32') luain = PyTorch.asFloatTensor(inTensor) luaout = luabit.getOut(luain, outSize, kernelSize) outTensor = luaout.asNumpyTensor() print('outTensor', outTensor) # I guess we just assume if we got to this point, with no exceptions, then thats a good thing... # lets add some new test... outTensor = luabit.addThree(luain).asNumpyTensor() assert isinstance(outTensor, np.ndarray) assert inTensor.shape == outTensor.shape assert np.abs((inTensor + 3) - outTensor).max() < 1e-4 res = luabit.printTable( { 'color': 'red', 'weather': 'sunny', 'anumber': 10, 'afloat': 1.234 }, 'mistletoe', { 'row1': 'col1', 'meta': 'data' }) print('res', res) assert res == {'foo': 'bar', 'result': 12.345, 'bear': 'happy'} # List and tuple support by conversion to dictionary reslist = luabit.modifyList([3.1415, r'~Python\omega', 42]) restuple = luabit.modifyList((3.1415, r'~Python\omega', 42)) assert len(reslist) == len(restuple) == 4 assert list(reslist.keys()) == list(restuple.keys()) == [1, 2, 3, 4] assert reslist[1] == restuple[1] assert (reslist[1] - 3.1415) < 1e-7 reslist.pop(1) restuple.pop(1) assert reslist == restuple == { 2: r'~Python\omega', 3: 42, 4: 'Lorem Ipsum' } # Get an object created from scratch by Lua res = luabit.getList() assert res[1] == 3.1415 res.pop(1) assert res == {2: 'Lua', 3: 123}
from grad_cam.utils import log_to_terminal import grad_cam.constants as constants import PyTorch import PyTorchHelpers import pika import time import yaml import json import traceback # Close the database connection in order to make sure that MYSQL Timeout doesn't occur django.db.close_old_connections() # Loading the VQA Model forever VQAModel = PyTorchHelpers.load_lua_class(constants.VQA_LUA_PATH, 'VQATorchModel') VqaTorchModel = VQAModel( constants.VQA_CONFIG['proto_file'], constants.VQA_CONFIG['model_file'], constants.VQA_CONFIG['input_sz'], constants.VQA_CONFIG['backend'], constants.VQA_CONFIG['layer_name'], constants.VQA_CONFIG['model_path'], constants.VQA_CONFIG['input_encoding_size'], constants.VQA_CONFIG['rnn_size'], constants.VQA_CONFIG['rnn_layers'], constants.VQA_CONFIG['common_embedding_size'], constants.VQA_CONFIG['num_output'], constants.VQA_CONFIG['seed'], constants.VQA_GPUID, )
from django.conf import settings import grad_cam.constants as constants import PyTorch import PyTorchHelpers # Loading the classification model forever ClassificationModel = PyTorchHelpers.load_lua_class(constants.CLASSIFICATION_LUA_PATH, 'ClassificationTorchModel') ClassificationTorchModel = ClassificationModel( constants.CLASSIFICATION_CONFIG['proto_file'], constants.CLASSIFICATION_CONFIG['model_file'], constants.CLASSIFICATION_CONFIG['backend'], constants.CLASSIFICATION_CONFIG['input_sz'], constants.CLASSIFICATION_CONFIG['layer_name'], constants.CLASSIFICATION_CONFIG['seed'], settings.GPUID, ) # Loading the VQA Model forever VQAModel = PyTorchHelpers.load_lua_class(constants.VQA_LUA_PATH, 'VQATorchModel') VqaTorchModel = VQAModel( constants.VQA_CONFIG['proto_file'], constants.VQA_CONFIG['model_file'], constants.VQA_CONFIG['input_sz'], constants.VQA_CONFIG['backend'], constants.VQA_CONFIG['layer_name'], constants.VQA_CONFIG['model_path'], constants.VQA_CONFIG['input_encoding_size'],
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'vqa.settings') from django.conf import settings from demo.utils import log_to_terminal import demo.constants as constants import PyTorch import PyTorchHelpers import pika import time import yaml import json # Loading the VQA Model forever VQAModel = PyTorchHelpers.load_lua_class(constants.VQA_LUA_PATH, 'HieCoattModel') VqaTorchModel = VQAModel( constants.VQA_CONFIG['vqa_model'], constants.VQA_CONFIG['cnn_proto'], constants.VQA_CONFIG['cnn_model'], constants.VQA_CONFIG['json_file'], constants.VQA_CONFIG['backend'], constants.VQA_CONFIG['gpuid'], ) connection = pika.BlockingConnection( pika.ConnectionParameters(host=settings.PIKA_HOST)) channel = connection.channel()
# load test data d = loadPickle(join(data_dir, 'test_batch')) NTest = d['data'].shape[0] testData = np.zeros((NTest, inputPlanes, inputWidth, inputHeight), np.float32) testLabels = np.zeros(NTest, np.uint8) data = d['data'].reshape(dataLength, inputPlanes, inputWidth, inputHeight) testData[:] = data testLabels[:] = d['labels'] return NTrain, trainData, trainLabels, NTest, testData, testLabels # load the lua class ResidualTrainer = PyTorchHelpers.load_lua_class('residual_trainer.lua', 'ResidualTrainer') residualTrainer = ResidualTrainer(num_layer_groups) if loadFrom is not None: residualTrainer.loadFrom(loadFrom) print('residualTrainer', residualTrainer) NTrain, trainData, trainLabels, NTest, testData, testLabels = loadData( data_dir, num_datafiles) print('data loaded :-)') # I think the mean and std are over all data, altogether, not specific to planes or pixel location? mean = trainData.mean() std = trainData.std() trainData -= mean
def torch_net(): import PyTorchHelpers TorchTrainer = PyTorchHelpers.load_lua_class('%s/TorchTrainer.lua'%opt.model_dir, 'TorchTrainer') net = TorchTrainer(vars(opt)) return net
import django django.setup() from django.conf import settings from amt.utils import log_to_terminal import amt.constants as constants import PyTorch import PyTorchHelpers import pika import time import yaml import json import traceback RLVisDialModel = PyTorchHelpers.load_lua_class(constants.RL_VISDIAL_LUA_PATH, 'RLConversationModel') RLVisDialATorchModel = RLVisDialModel( constants.RL_VISDIAL_CONFIG['inputJson'], constants.RL_VISDIAL_CONFIG['qBotpath'], constants.RL_VISDIAL_CONFIG['aBotpath'], constants.RL_VISDIAL_CONFIG['gpuid'], constants.RL_VISDIAL_CONFIG['backend'], constants.RL_VISDIAL_CONFIG['imfeatpath'], ) connection = pika.BlockingConnection( pika.ConnectionParameters(host='localhost')) channel = connection.channel()
def epochToLearningRate(epoch): if epoch < 100: return opt.base_lr if epoch < 200: return opt.base_lr/10.0 return opt.base_lr/100.0 #----------------------------------------------------------------# train_data, val_data, _, img_mean= load_imsitu(opt.data_dir) _, train_images, train_labels= train_data _, val_images, val_labels= val_data opt.img_mean=img_mean TorchTrainer = PyTorchHelpers.load_lua_class('%s/TorchTrainer.lua'%model_dir, 'TorchTrainer') net = TorchTrainer(vars(opt)) n_train, n_val= train_images.shape[0], val_images.shape[0] n_train_batches= n_train/batch_size n_val_batches= n_val/batch_size epoch = net.epoch # +objs opt.pad=1 while True: if epoch%opt.eval_val_every==0 and epoch>net.epoch: val_truth, val_pred=[], []
from __future__ import print_function, division import PyTorch import PyTorchHelpers import numpy as np TorchModel = PyTorchHelpers.load_lua_class('openfacePyTorch.lua', 'TorchModel') torchModel = TorchModel(96) img = np.ones([100, 3, 96, 96], np.float32) print(img.shape) out = torchModel.predict(img) print(out.asNumpyTensor().shape)
from django.conf import settings from grad_cam.utils import log_to_terminal from grad_cam.models import ClassificationJob import grad_cam.constants as constants import PyTorch import PyTorchHelpers import pika import time import yaml import json import traceback # Close the database connection in order to make sure that MYSQL Timeout doesn't occur django.db.close_old_connections() ClassificationModel = PyTorchHelpers.load_lua_class( constants.CLASSIFICATION_LUA_PATH, 'ClassificationTorchModel') ClassificationTorchModel = ClassificationModel( constants.CLASSIFICATION_CONFIG['proto_file'], constants.CLASSIFICATION_CONFIG['model_file'], constants.CLASSIFICATION_CONFIG['backend'], constants.CLASSIFICATION_CONFIG['input_sz'], constants.CLASSIFICATION_CONFIG['layer_name'], constants.CLASSIFICATION_CONFIG['seed'], constants.CLASSIFICATION_GPUID, ) connection = pika.BlockingConnection( pika.ConnectionParameters(host='localhost')) channel = connection.channel()
from chat.utils import log_to_terminal from chat.models import Job, Dialog import chat.constants as constants import PyTorch import PyTorchHelpers import pika import time import yaml import json import traceback django.db.close_old_connections() CaptioningModel = PyTorchHelpers.load_lua_class( constants.CAPTIONING_LUA_PATH, 'CaptioningTorchModel') CaptioningTorchModel = CaptioningModel( constants.CAPTIONING_CONFIG['model_path'], constants.CAPTIONING_CONFIG['backend'], constants.CAPTIONING_CONFIG['input_sz'], constants.CAPTIONING_CONFIG['layer'], constants.CAPTIONING_CONFIG['seed'], constants.CAPTIONING_GPUID, ) connection = pika.BlockingConnection(pika.ConnectionParameters( host='localhost')) channel = connection.channel() channel.queue_declare(queue='visdial_captioning_task_queue', durable=True)
''' from oracle import Oracle from state import State from action import Action from variables import Variables from history import History from node import Node from rules import Rules from relations import Relations import PyTorch import PyTorchHelpers import numpy as np import copy Classify = PyTorchHelpers.load_lua_class('nnets/classify.lua', 'Classify') class TransitionSystem: def __init__(self, embs, data, stage, model_dir = None): if model_dir is not None: self._classify = Classify(model_dir) self._labels = [item.strip() for item in open(model_dir + "/relations.txt").read().splitlines()] else: self._labels = None if stage == "ORACLETEST": assert(len(data) == 4) hooks = False tokens, dependencies, relations, alignments = data lemmas = None
from demo.utils import log_to_terminal import demo.constants as constants import PyTorch import PyTorchHelpers import pika import time import yaml import json import traceback import os print constants.DBS_CONFIG print constants.DBS_GPUID # Loading the VQA Model forever DBSModel = PyTorchHelpers.load_lua_class(constants.DBS_LUA_PATH, 'DBSTorchModel') DBSTorchModel = DBSModel( constants.DBS_CONFIG['model'], constants.DBS_CONFIG['batch_size'], # constants.DBS_CONFIG['num_images'], constants.DBS_CONFIG['language_eval'], constants.DBS_CONFIG['dump_images'], constants.DBS_CONFIG['dump_json'], constants.DBS_CONFIG['dump_json_postfix'], constants.DBS_CONFIG['dump_path'], # constants.DBS_CONFIG['B'], # constants.DBS_CONFIG['M'], # constants.DBS_CONFIG['lambda'], constants.DBS_CONFIG['divmode'], constants.DBS_CONFIG['temperature'], # constants.DBS_CONFIG['primetext'],
from django.conf import settings from grad_cam.utils import log_to_terminal from grad_cam.models import ClassificationJob import grad_cam.constants as constants import PyTorch import PyTorchHelpers import pika import time import yaml import json import traceback # Close the database connection in order to make sure that MYSQL Timeout doesn't occur django.db.close_old_connections() ClassificationModel = PyTorchHelpers.load_lua_class(constants.CLASSIFICATION_LUA_PATH, 'ClassificationTorchModel') ClassificationTorchModel = ClassificationModel( constants.CLASSIFICATION_CONFIG['proto_file'], constants.CLASSIFICATION_CONFIG['model_file'], constants.CLASSIFICATION_CONFIG['backend'], constants.CLASSIFICATION_CONFIG['input_sz'], constants.CLASSIFICATION_CONFIG['layer_name'], constants.CLASSIFICATION_CONFIG['seed'], constants.CLASSIFICATION_GPUID, ) connection = pika.BlockingConnection(pika.ConnectionParameters( host='localhost')) channel = connection.channel()