def imshow(inp): """Imshow for Tensor.""" inp = inp.numpy().transpose((1, 2, 0)) mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) inp = std * inp + mean inp = np.clip(inp, 0, 1) plt.imshow(inp) plt.show() # end imsho # Argument parser args = functions.argument_parser_training_model('image') # Image augmentation and normalization image_transforms = dict() image_transforms['train'] = functions.image_transformer('train') image_transforms['val'] = functions.image_transformer('val') # Image data set pan18loader_training, pan18loader_validation = functions.load_images_dataset( image_transforms, args.batch_size, args.val_batch_size ) # Loss function loss_function = nn.CrossEntropyLoss()
# You should have received a copy of the GNU General Public License # along with Foobar. If not, see <http://www.gnu.org/licenses/>. # # Imports import torch from torchlanguage import models import torch.nn as nn from torch.autograd import Variable from torch import optim import copy import os from tools import functions, settings # Parse argument args = functions.argument_parser_training_model('tweet') # Transformer transformer = functions.tweet_transformer(args.lang, args.n_gram) # Load data sets pan17loader_training, pan17loader_validation, pan18loader_training, pan18loader_validation = \ functions.load_tweets_dataset(args.lang, transformer, args.batch_size, args.val_batch_size) # Loss function loss_function = nn.CrossEntropyLoss() # Model model = models.CNNCTweet(text_length=settings.min_length, vocab_size=settings.voc_sizes[args.n_gram][args.lang], embedding_dim=args.dim) if args.cuda: