def test_soft_clip(self): wav = np.array([-1, -0.5, 0, 0.5, 1]) assert np.allclose( transforms.SoftClip()(wav), np.array([0.26894142, 0.37754067, 0.5, 0.62245933, 0.73105858])) assert np.allclose( transforms.SoftClip(1)(wav), np.array([0.26894142, 0.37754067, 0.5, 0.62245933, 0.73105858])) assert np.allclose( transforms.SoftClip(1.)(wav), np.array([0.26894142, 0.37754067, 0.5, 0.62245933, 0.73105858])) with pytest.raises(ValueError): transforms.SoftClip('a')
from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader from torch.utils.data import random_split import yews.datasets as dsets import yews.transforms as transforms from yews.models import Cpic from yews.train import Trainer if __name__ == '__main__': # Preprocessing waveform_transform = transforms.Compose([ transforms.ZeroMean(), transforms.SoftClip(1e-4), transforms.ToTensor(), ]) # Prepare dataset dset = dsets.Wenchuan(path='.', download=True, sample_transform=waveform_transform) # Split datasets into training and validation train_length = int(len(dset) * 0.8) val_length = len(dset) - train_length train_set, val_set = random_split(dset, [train_length, val_length]) # Prepare dataloaders train_loader = DataLoader(train_set, batch_size=100, shuffle=True, num_workers=4) val_loader = DataLoader(val_set, batch_size=1000, shuffle=False, num_workers=8)
# from yews.models import polarity_v1 # polarity=polarity_v1 from yews.models import fm_v1 from yews.models import fm_v2 focal_mechanism = fm_v2 if __name__ == '__main__': print("Now: start : " + str(datetime.datetime.now())) # Preprocessing waveform_transform = transforms.Compose([ transforms.ZeroMean(), transforms.SoftClip(1e-2), transforms.ToTensor(), ]) # Prepare dataset dsets.set_memory_limit(10 * 1024**3) # first number is GB # dset = dsets.Wenchuan(path='/home/qszhai/temp_project/deep_learning_course_project/cpic', download=False,sample_transform=waveform_transform) # dset = dsets.SCSN_polarity(path='/home/qszhai/temp_project/deep_learning_course_project/first_motion_polarity/scsn_data/npys', download=False, sample_transform=waveform_transform) # dset = dsets.Taiwan_focal_mechanism(path='/home/qszhai/temp_project/deep_learning_course_project/focal_mechanism/npys_for_focal_mechanism', download=False, sample_transform=waveform_transform) # # Split datasets into training and validation # train_length = int(len(dset) * 0.8) # val_length = len(dset) - train_length # train_set, val_set = random_split(dset, [train_length, val_length]) train_set = dsets.Taiwan_focal_mechanism(
def test_soft_clip(self): wav = np.array([-1, -0.5, 0, 0.5, 1]) assert np.allclose(transforms.SoftClip()(wav), np.array([0.26894142, 0.37754067, 0.5, 0.62245933, 0.73105858]))
from yews.train import Trainer #from yews.models import cpic from yews.models import cpic_v1 from yews.models import cpic_v2 from yews.models import cpic_v3 cpic = cpic_v3 if __name__ == '__main__': print("Now: start : " + str(datetime.datetime.now())) # Preprocessing waveform_transform = transforms.Compose([ transforms.ZeroMean(), transforms.SoftClip(1e-3), transforms.ToTensor(), ]) # Prepare dataset dsets.set_memory_limit(10 * 1024**3) # first number is GB dset = dsets.Wenchuan( path= '/home/qszhai/temp_project/deep_learning_course_project/cpic/wenchuan_data/train_npy', download=False, sample_transform=waveform_transform) # Split datasets into training and validation train_length = int(len(dset) * 0.8) val_length = len(dset) - train_length train_set, val_set = random_split(dset, [train_length, val_length])