Exemple #1
0
def load_model(path=None, fresh_meta=None):
    if not path: path = config.model_path
    if not fresh_meta: fresh_meta = config.fresh_meta
    if path[-3:] != '.pk': path += '.pk'
    obj = pickle_load(path)
    if obj:
        model, meta, configs = obj
        if config.use_gpu:
            TorchModel(model).cuda()
        global moments, variances
        if fresh_meta:
            moments, variances = [], []
        else:
            moments, variances = meta
            if config.use_gpu:
                moments = [[e2.cuda() for e2 in e1] for e1 in moments]
                variances = [[e2.cuda() for e2 in e1] for e1 in variances]
        for k_saved, v_saved in configs:
            v = getattr(config, k_saved)
            if v != v_saved:
                if v == 'all_losses' and fresh_meta: continue
                print(
                    f'config conflict resolution: {k_saved} {v} -> {v_saved}')
                setattr(config, k_saved, v_saved)
        return model
Exemple #2
0
def load_data(frames=False):
    data = [
        Tensor(
            sequence[:config.frame_len + (len(sequence) - config.frame_len) //
                     config.frame_stride * config.frame_stride])
        for sequence in pickle_load(config.data_path + '.pk')
    ]

    if not frames:
        if config.use_gpu:
            data = [sequence.cuda() for sequence in data]
        return [e.view(1, 1, -1) for e in data]
    else:
        data = [d.view(1, -1, 1) for d in data]
        #hann_w = hann()
        frames = [
            [
                sequence[:, i * config.frame_stride:i * config.frame_stride +
                         config.frame_len, :]  #*hann_w
                for i in range((sequence.size(1) - config.frame_len) //
                               config.frame_stride + 1)
            ] for sequence in data
        ]
        if config.use_gpu:
            frames = [[frame.cuda() for frame in seq] for seq in frames]
        return frames
def load_data(path=None):
    if not path: path = config.data_path
    if path[-3:] != '.pk': path += '.pk'
    data = pickle_load(path)
    if data:
        for d_index, (sequence, time_sig) in enumerate(data):

            d = []

            for timestep in sequence:

                if not config.polyphony:

                    vec = empty_vector_multi_oct.copy()

                    if config.monophony_mode == 'l':
                        for i, e in zip(range(len(timestep) - 1),
                                        timestep[:-1]):
                            if e > 0:
                                vec[i] = 1
                                break

                    elif config.monophony_mode == 'h':
                        for i, e in zip(reversed(range(len(timestep) - 1)),
                                        reversed(timestep[:-1])):
                            if e > 0:
                                vec[i] = 1
                                break

                    else:
                        vec[timestep.index(max(timestep))] = 1

                    if sum(vec) == 0: vec[-1] = 1

                    timestep = vec

                if not config.multi_octave:

                    vec = empty_vector_single_oct.copy()
                    vec[-1] = timestep[-1]

                    for i, e in enumerate(timestep[:-1]):
                        if e > 0:
                            vec[i % 12] += e

                    timestep = vec

                timestep = tensor(normalize_vector(timestep), dtype=float32)

                if config.act_fn == 't': timestep = timestep * 2 - 1

                d.append(timestep)

            data[d_index] = d

            # if input('Show stream? (y/n): ').lower() == 'y':
            #     convert_to_stream([''.join(f'{note_reverse_dict[i%12]}{int(i/12)+config.min_octave},' if i!=len(timestep)-1 else 'R,' for i, element in enumerate(timestep) if element>0)[:-1] for timestep in d]).show()

        return data
def load_data(with_meta=False):
    from torch import Tensor
    data = pickle_load(config.data_path+'.pk')
    data_tensors = []
    for sequence,meta in data:
        sequence = Tensor(sequence).view(-1,1)
        if config.use_gpu:
            sequence = sequence.cuda()
        data_tensors.append(sequence if not with_meta else [sequence,meta])
    return data_tensors
def load_data(with_meta=False):
    from torch import Tensor
    path = config.data_path if config.data_path[
        -3:] == '.pk' else config.data_path + '.pk'
    data, meta = pickle_load(path)
    if config.act_fn == 't': data = [d * 2 - 1 for d in data]
    data_tensors = [
        Tensor(sequence)
        if not config.use_gpu else Tensor(sequence).cuda(config.gpu_id)
        for sequence in data
    ]
    return data_tensors if not with_meta else [data_tensors, meta]
Exemple #6
0
def load_model(path=None, fresh_meta=None, py_serialize=True):
    if not path: path = config.model_load_path
    if not fresh_meta: fresh_meta = config.fresh_meta
    obj = pickle_load(path+'.pk')
    if obj:
        model, meta = obj
        if py_serialize:
            model = [type(layer)(*[tensor(getattr(layer,field),requires_grad=True) for field in layer._fields]) for layer in model]
        global moments, variances
        if fresh_meta:
            moments, variances = [], []
        else:
            moments, variances = meta
            if py_serialize:
                moments = [[tensor(e) for e in ee] for ee in moments]
                variances = [[tensor(e) for e in ee] for ee in variances]
        return model
Exemple #7
0
def load_data(path=None):
    if not path: path = config.data_path
    data = pickle_load(path + '.pk')

    if data:

        for i, (sequence, time_sig) in enumerate(data):

            file_data = []

            window_size = int(config.hm_bars_grouped * time_sig *
                              config.beat_resolution)
            window_slide = int(config.hm_bars_slide * time_sig *
                               config.beat_resolution)

            hm_windows = ceil(len(sequence) / window_slide)

            for sub_sequence in [
                    sequence[i * window_slide:i * window_slide + window_size]
                    for i in range(hm_windows)
            ]:

                inp = [
                    tensor(timestep, dtype=float32).view(1, config.in_size)
                    for timestep in sub_sequence[:-1]
                ]
                lbl = [
                    tensor(timestep, dtype=float32).view(1, config.in_size)
                    for timestep in sub_sequence[1:]
                ]

                # if not config.act_classical_rnn:
                #     lbl = [tensor(timestep+[0]*(config.statevec_size-config.in_size),dtype=float32).view(1,config.statevec_size) for timestep in sub_sequence[1:]]
                # else:
                #     lbl = [tensor(timestep, dtype=float32).view(1, config.in_size) for timestep in sub_sequence[1:]]

                file_data.append([inp, lbl])

            data[i] = file_data

        return data
Exemple #8
0
def main():
    prev_data = pickle_load(config.data_path + '.pk')
    if prev_data: print('> appending data to prev file')
    save_data(preprocess() + (prev_data if prev_data else []))