def __init__(self, datasetFile, textDir, checking_folder, lang, client_txt, pre_trained_gen, pre_trained_disc, ID, batch_size=1): self.generator = torch.nn.DataParallel( gan_factory.generator_factory('gan').cuda()) self.generator.load_state_dict(torch.load(pre_trained_gen)) self.discriminator = torch.nn.DataParallel( gan_factory.discriminator_factory('gan').cuda()) self.discriminator.load_state_dict(torch.load(pre_trained_disc)) self.checking_folder = checking_folder self.lang = lang self.client_txt = client_txt self.filename = ID self.batch_size = batch_size cl = CorpusLoader(datasetFile=datasetFile, textDir=textDir) self.vectorizer = cl.TrainVocab()
def __init__(self, datasetFile, imagesDir, textDir, split, arrangement, sampling): self.datasetFile = datasetFile self.imagesDir = imagesDir self.textDir = textDir self.split = split self.arrangement = easydict.EasyDict(arrangement) self.sampling = easydict.EasyDict(sampling) self.images_classes = {} self.assign_classes() cl = CorpusLoader(datasetFile=datasetFile, textDir=textDir) self.vectorizer = cl.TrainVocab()