#check_embeddings_consistency(m) save_embeddings_to_file(m) #build_database_from_scratch(m) exit() encoder = m.load_currently_selected_model() encoder.state['dictionary'] = m.folders['binarized'] + 'dict.pkl' with open( m.folders['current_version'] + '2016-12-28_07:19:58_43.184_model.pkl', 'wb') as f: dill.dump(encoder, f, protocol=dill.HIGHEST_PROTOCOL) m.save_state(encoder.state) exit() m = model_manager.ModelManager('ubuntu_vhred_vanilla') encoder = m.load_currently_selected_model() print encoding_tools.create_model_specific_encoding_hash(encoder) with open( m.folders['model_versions'] + '2016-12-28_07:19:58_43.184_model.pkl', 'wb') as f: dill.dump(encoder, f, protocol=dill.HIGHEST_PROTOCOL) exit() #with open('../models/test/model_versions/2017-06-01_09:51:00_inf_model.pkl', 'rb') as f: with open('./models/test/model_versions/2017-06-09_12:06:14_inf_model.pkl', 'rb') as f:
USE_CUDA = torch.cuda.is_available() NUM_GPUS = len(cuda_devices) try: os.mkdir(datasets.BASE_DATA_FOLDER) except: pass try: os.mkdir(SAVED_MODELS_FOLDER) except: pass epochsToTrainImageNet = 90 imageNet12modelsFolder = os.path.join(SAVED_MODELS_FOLDER, 'imagenet12_new') imagenet_manager = model_manager.ModelManager( 'model_manager_imagenet_Alexnet_distilled4bits.tst', 'model_manager', create_new_model_manager=False) for x in imagenet_manager.list_models(): if imagenet_manager.get_num_training_runs(x) >= 1: s = '{}; Last prediction acc: {}, Best prediction acc: {}'.format( x, imagenet_manager.load_metadata(x)[1]['predictionAccuracy'][-1], max(imagenet_manager.load_metadata(x)[1]['predictionAccuracy'])) print(s) try: os.mkdir(imageNet12modelsFolder) except: pass
def _get_model(self): import model_manager return model_manager.ModelManager(self.ProjectParameters["model_settings"])
edits1(word), NWORDS) or known_edits2(word, NWORDS) or [word] return max(candidates, key=NWORDS.get) def get_spell_corrector(model_manager): assert os.path.exists(model_manager.files['frequencies']) with open(model_manager.files['frequencies'], 'rb') as f: NWORDS = cPickle.load(f) def wrapper(some_word): return correct(some_word, NWORDS) return wrapper if __name__ == '__main__': logging.basicConfig( level=logging.DEBUG, format="%(asctime)s: %(name)s: %(levelname)s: %(message)s") m = model_manager.ModelManager('test') corr = get_spell_corrector(m) print corr('hoik') print corr('smakelig') print corr('dankjeiwel')
datasets.BASE_DATA_FOLDER = '/workspace/mnt/group/video/zhaozhijian/dataset' SAVED_MODELS_FOLDER = 'models' USE_CUDA = torch.cuda.is_available() print('CUDA_VISIBLE_DEVICES: {}'.format(os.environ['CUDA_VISIBLE_DEVICES'])) try: os.mkdir(datasets.BASE_DATA_FOLDER) except:pass try: os.mkdir(SAVED_MODELS_FOLDER) except:pass cifar10Manager = model_manager.ModelManager('model_manager_cifar10.tst', 'model_manager', create_new_model_manager=False) cifar10modelsFolder = os.path.join(SAVED_MODELS_FOLDER, 'cifar10') for x in cifar10Manager.list_models(): if cifar10Manager.get_num_training_runs(x) >= 1: print(x, cifar10Manager.load_metadata(x)[1]['predictionAccuracy'][-1]) try: os.mkdir(cifar10modelsFolder) except:pass epochsToTrainCIFAR = 1 USE_BATCH_NORM = True AFFINE_BATCH_NORM = True TRAIN_TEACHER_MODEL = True
train_loader, test_loader = cifar10.getTrainLoader(batch_size), cifar10.getTestLoader(batch_size) import cnn_models.conv_forward_model as convForwModel import cnn_models.help_fun as cnn_hf teacherModel = convForwModel.ConvolForwardNet(**convForwModel.teacherModelSpec, useBatchNorm=True, useAffineTransformInBatchNorm=True) #convForwModel.train_model(teacherModel, train_loader, test_loader, epochs_to_train=20) import cnn_models.conv_forward_model as convForwModel import cnn_models.help_fun as cnn_hf import model_manager model_manager_path = 'model_manager_cifar10.tst' model_save_path ='models' __mkdir(model_save_path) if os.path.exists(model_manager_path): cifar10Manager = model_manager.ModelManager('model_manager_cifar10.tst', 'model_manager', create_new_model_manager=False)#the first t else: cifar10Manager = model_manager.ModelManager('model_manager_cifar10.tst', 'model_manager', create_new_model_manager=True)#the first t model_name = 'cifar10_teacher' teacherModelPath = os.path.join(model_save_path, model_name) teacherModel = convForwModel.ConvolForwardNet(**convForwModel.teacherModelSpec, useBatchNorm=True, useAffineTransformInBatchNorm=True) print(cifar10Manager.saved_models) #convForwModel.train_model(teacherModel, train_loader, test_loader, epochs_to_train=2) if not model_name in cifar10Manager.saved_models: cifar10Manager.add_new_model(model_name, teacherModelPath,
TRAIN_SEQUENCE_DISTILLED = False TRAIN_WORD_DISTILLED = False TRAIN_QUANTIZED_DISTILLED = False TRAIN_DIFFERENTIABLE_QUANTIZATION = False COMPUTE_BLEU_MODELS = True CHECK_PM_QUANTIZATION = True try: os.mkdir(datasets.BASE_DATA_FOLDER) except:pass try: os.mkdir(SAVED_MODELS_FOLDER) except:pass epochsToTrainOnmtIntegDataset = 15 onmtManager = model_manager.ModelManager('model_manager_WMT13.tst', 'model_manager', create_new_model_manager=False) for x in onmtManager.list_models(): if onmtManager.get_num_training_runs(x) > 0: print(x, onmtManager.load_metadata(x)[1]['perplexity'][-1]) WMT13_saved_models_folder = os.path.join(SAVED_MODELS_FOLDER, 'WMT13') try: os.mkdir(WMT13_saved_models_folder) except:pass #load the data batch_size = 64 * NUM_GPUS if batch_size % NUM_GPUS != 0: raise ValueError('Batch size: {} must be a multiple of the number of gpus:{}'.format(batch_size, NUM_GPUS)) transl_dataset = datasets.WMT13_DE_EN(pin_memory=True)
try: os.makedirs(opt.outf) except OSError: pass # ============================================================================= # manager # ============================================================================= model_manager_path = 'model_manager_pointnetcls.tst' manager_name = "mg_pointnetcls" model_save_path = 'models' __mkdir(model_save_path) if os.path.exists(model_manager_path): pointNetManager = model_manager.ModelManager( model_manager_path, manager_name, create_new_model_manager=False) # the first t else: pointNetManager = model_manager.ModelManager( model_manager_path, manager_name, create_new_model_manager=True) # the first t # ============================================================================= # teacher # ============================================================================= model_name = 'pointnetcls_teacher' teacherModelPath = os.path.join(model_save_path, model_name) classifier = pointnet.model.PointNetCls( k=num_classes, feature_transform=opt.feature_transform) teacherModel = classifier print(pointNetManager.saved_models)
USE_CUDA = torch.cuda.is_available() NUM_GPUS = len(cuda_devices) try: os.mkdir(datasets.BASE_DATA_FOLDER) except: pass try: os.mkdir(SAVED_MODELS_FOLDER) except: pass epochsToTrainImageNet = 90 imageNet12modelsFolder = os.path.join(SAVED_MODELS_FOLDER, 'imagenet12_new') imagenet_manager = model_manager.ModelManager( 'model_manager_resnet34double.tst', 'model_manager', create_new_model_manager=False) for x in imagenet_manager.list_models(): if imagenet_manager.get_num_training_runs(x) >= 1: s = '{}; Last prediction acc: {}, Best prediction acc: {}'.format( x, imagenet_manager.load_metadata(x)[1]['predictionAccuracy'][-1], max(imagenet_manager.load_metadata(x)[1]['predictionAccuracy'])) print(s) try: os.mkdir(imageNet12modelsFolder) except: pass
try: os.mkdir(SAVED_MODELS_FOLDER) except: pass try: os.mkdir(MANAGER_FOLDER) except: pass manager_path = os.path.join(MANAGER_FOLDER, args.manager + '.tst') create_new = True if os.path.exists(manager_path): create_new = False Manager = model_manager.ModelManager(manager_path, args.manager, create_new_model_manager=create_new) modelsFolder = os.path.join(SAVED_MODELS_FOLDER, args.data) try: os.mkdir(modelsFolder) except: pass epochsToTrainCIFAR = args.epochs USE_BATCH_NORM = True AFFINE_BATCH_NORM = True if args.data == 'cifar10': data = datasets.CIFAR10() elif args.data == 'cifar100':
import torch import os import datasets import cnn_models.conv_forward_model as convForwModel import cnn_models.help_fun as cnn_hf import quantization import pickle import copy import quantization.help_functions as qhf import functools import helpers.functions as mhf datasets.BASE_DATA_FOLDER = '/home/fuhao/workspace-4gpu/data' SAVED_MODELS_FOLDER = '/home/fuhao/workspace-4gpu/projects/quantization/Bayesian_quant/models/' cifar10Manager = model_manager.ModelManager( './models/Bayesian_distill_cifar10/model_manager_Bayesian_distill_cifar10.tst', 'model_manager', create_new_model_manager=True) cifar10modelsFolder = os.path.join(SAVED_MODELS_FOLDER, 'Bayesian_distill_cifar10') try: os.mkdir(datasets.BASE_DATA_FOLDER) except: pass try: os.mkdir(SAVED_MODELS_FOLDER) except: pass try: os.mkdir(cifar10modelsFolder) except: pass
SAVED_MODELS_FOLDER = '...' USE_CUDA = torch.cuda.is_available() try: os.mkdir(datasets.BASE_DATA_FOLDER) except: pass try: os.mkdir(SAVED_MODELS_FOLDER) except: pass epochsToTrainOnmtIntegDataset = 13 onmtManager = model_manager.ModelManager('model_manager_multi30k_dataset.tst', 'model_manager', create_new_model_manager=False) for x in onmtManager.list_models(): if onmtManager.get_num_training_runs(x) > 0: print(x, onmtManager.load_metadata(x)[1]['perplexity'][-1]) multi30k_saved_models_folder = os.path.join(SAVED_MODELS_FOLDER, 'multi30k') try: os.mkdir(multi30k_saved_models_folder) except: pass #load the data batch_size = 256 transl_dataset = datasets.multi30k_DE_EN(pin_memory=True) train_loader, test_loader = transl_dataset.getTrainLoader(
USE_CUDA = torch.cuda.is_available() NUM_GPUS = len(cuda_devices) try: os.mkdir(datasets.BASE_DATA_FOLDER) except: pass try: os.mkdir(SAVED_MODELS_FOLDER) except: pass epochsToTrainImageNet = 90 imageNet12modelsFolder = os.path.join(SAVED_MODELS_FOLDER, 'imagenet12_new') imagenet_manager = model_manager.ModelManager( 'model_manager_imagenet_distilled_New{}bits.tst'.format(NUM_BITS), 'model_manager', create_new_model_manager=False) for x in imagenet_manager.list_models(): if imagenet_manager.get_num_training_runs(x) >= 1: s = '{}; Last prediction acc: {}, Best prediction acc: {}'.format( x, imagenet_manager.load_metadata(x)[1]['predictionAccuracy'][-1], max(imagenet_manager.load_metadata(x)[1]['predictionAccuracy'])) print(s) try: os.mkdir(imageNet12modelsFolder) except: pass
CHECK_PM_QUANTIZATION = True COMPUTE_WORD_PERCENTAGE_SIMILARITY = True try: os.mkdir(datasets.BASE_DATA_FOLDER) except: pass try: os.mkdir(SAVED_MODELS_FOLDER) except: pass epochsToTrainOnmtIntegDataset = 15 onmtManager = model_manager.ModelManager('model_manager_integ_dataset.tst', 'model_manager', create_new_model_manager=False) for x in onmtManager.list_models(): if onmtManager.get_num_training_runs(x) > 0: print(x, onmtManager.load_metadata(x)[1]['perplexity'][-1]) integ_dataset_saved_models_folder = os.path.join(SAVED_MODELS_FOLDER, 'integ_dataset') try: os.mkdir(integ_dataset_saved_models_folder) except: pass #load the data batch_size = 64 * NUM_GPUS if batch_size % NUM_GPUS != 0: