help='random seed(default:1)') parser.add_argument( '-log-interval', type=int, default=1, metavar='N', help='how many batches to wait before logging training status') args = parser.parse_args() args.cuda = not args.no_cuda and torch.cuda.is_available() torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) kwargs = {'num_workers': 3, 'pin_memory': True} if args.cuda else {} train_loader = torch.utils.data.DataLoader(DataUtils.ECGDataset(train_path, test_path, train=True), batch_size=args.train_batch_size, drop_last=True, shuffle=True) test_loader = torch.utils.data.DataLoader(DataUtils.ECGDataset(train_path, test_path, test=True), batch_size=args.test_batch_size, drop_last=True, shuffle=True) #model = LSTM(28*28, 64, 10) MNIST dataset #model = LSTM(140, 64, 5) #model = FC(28 * 28, 300, 100, 10) #model = TTRNN([4,7,4,7], [4,2,4,4], [1,3,4,2,1], 1, 0.8, 'ttgru') #model = RNN([2,5,2,7], [4,4,2,4], [1,2,5,3,1], 0.8, 5)