from sklearn.manifold import TSNE from sc_dm.datasets import * import torch from ptsdae.sdae import StackedDenoisingAutoEncoder as SDAE if __name__ == '__main__': # ############################################################################# dset = sys.argv[1] #raw_data = DuoBenchmark('data/datasets/'+dset+'.csv') raw_data = FromPickle('data/embeddings/mouse-pca-15000-log1p-True.pickle') model = SDAE([raw_data.dims, 7500, 500, 2000, 50]) #model.load_state_dict(torch.load('data/models/'+dset+'.pt')) model.load_state_dict(torch.load(sys.argv[1])) if int(torch.__version__.split('.')[1]) == 3: var = torch.autograd.variable.Variable(torch.Tensor(raw_data.data)) else: var = torch.Tensor(raw_data.data) embedding = model.encoder(var).data.numpy() labels = DBSCAN().fit(embedding).labels_ tsne_embedding = TSNE(n_components=2).fit_transform(embedding) # ############################################################################# plt_file = 'data/plots/mouse_SDAE.pdf' plt.scatter(tsne_embedding[:, 0],
ds_path = os.path.join('data/datasets', ds_name + '.csv') dataset = DuoBenchmark(ds_path, log1p=log, split_head=False) for scale in [True]: # Do scaling second as the function will # overwrite the existing data # yes - yes I know this is bad design but it's too late now mlist = model_dict[ds_name][log][scale] # Given all of the pre-existing conditions ... # cycle through each of the models that match this criteria for model in mlist: filename = model[0] print(filename) if scale: scale_dataset(dataset) # get parameter information model_path = os.path.join(model_dir, filename) layers = model[1] # prepare the model model = SDAE([dataset.dims] + layers) model.load_state_dict( torch.load(model_path, map_location='cpu')) # generate the embedding inputs = torch.Tensor(dataset.data) embedding = model.encoder(inputs).data.numpy() # save the embedding with open( os.path.join('data/sdae_embeddings', filename + '.pickle'), 'wb') as fh: pickle.dump(embedding, fh, protocol=4)