# Load checkpoint
print("Load checkpoint from {}".format(checkpoint_path))
if use_cuda:
    checkpoint = torch.load(checkpoint_path)
else:
    checkpoint = torch.load(checkpoint_path,
                            map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint["state_dict"])
checkpoint_name = splitext(basename(checkpoint_path))[0]

with open('TestSignalList500.pkl', 'rb') as f:  # Python 3: open(..., 'rb')
    sequence_i_save, interf_i_save = pickle.load(f)

############################################################
from DataGenerator import dataGenBig as dataGenBig, ExtractFeatureFromOneSignal
dg = dataGenBig(seedNum=123456789, verbose=False, verboseDebugTime=False)
# [aa,bb] = dg.myDataGenerator(0)# change yield to return to debug the generator

####################
SampleN = 100
for sample_i in range(
        50, SampleN):  # SampleN groups of mixtures and separated signals.
    print("Sample number {}".format(sample_i + 1))
    sequence_i = sequence_i_save[sample_i]
    interf_i = interf_i_save[sample_i]

    target_path = dg.target_test[sequence_i]
    interf_path = dg.interf_test[interf_i]
    print(target_path, '\n', interf_path)

    # generate the mixture and features
Esempio n. 2
0
parser.add_argument('--debug', default=0, type=int)  # debug>0 will save weights by TensorBoard
parser.add_argument('--save_dir', default=None)
parser.add_argument('--is_training', default=1, type=int)
parser.add_argument('-w', '--weights', default=None, help="The path of the saved weights")
parser.add_argument('--lr', default=0.001, type=float, help="Initial learning rate")
parser.add_argument('--lr_decay', default=0.98, type=float, help="The value multiplied by lr at each epoch.")
args = parser.parse_args()
print(args)


############################################################
from DataGenerator import dataGenBig as dataGenBig
# dg = dataGenBig()

if args.continueToTrainFlag:
    dg = dataGenBig(seedNum=args.newSeedNum, verbose=False, verboseDebugTime=False)
    # dg = dataGenBig(seedNum=123456789, verbose=False, verboseDebugTime=False)
else:
    dg = dataGenBig(seedNum = 123456789, verbose = False, verboseDebugTime=False)
# [aa,bb] = dg.myDataGenerator(0)# change yield to return to debug the generator


from LossFuncs import my_loss as customLoss


if args.outputFlag==0:
    from GenerateModels import EncoderNetBigMel as GenerateModel
elif args.outputFlag==1:
    from GenerateModels import EncoderNetBigLinear as GenerateModel

Esempio n. 3
0
def get_available_gpus():
    local_device_protos = device_lib.list_local_devices()
    return [x.name for x in local_device_protos if x.device_type == 'GPU']


GPUFlag = True
ExistGPUs = get_available_gpus()
if len(ExistGPUs) == 0:
    GPUFlag = False

# time mode 1, frequency mode 2, TF mode 3
modelMode = 3

from DataGenerator import dataGenBig
dg = dataGenBig()
dg.TrainDataParamsInit()
# [aa,bb] = dg.TrainDataGenerator()# change yield to return to debug the generator

# Load the model
#####################################################
##################Direct Regression #################
#####################################################
from Others import my_loss as customLoss
mode = 2

if modelMode == 1:
    from GenerateModels import GenerateBLSTMTime as GenerateBLSTM

    tag = 'TimeModel'