コード例 #1
0
                    nade=args.nade,
                    conditioning=args.conditioning,
                    double_conditioning=args.double_conditioning,
                    num_layers=args.num_layers,
                    dropout=dropout,
                    input_dropout=input_dropout,
                    input_dropout_token=input_dropout_token,
                    lr=lr,
                    reduction_flag=reduction_flag,
                    gpu_ids=gpu_ids,
                    suffix=args.suffix,
                    mixup=mixup,
                    scheduled_training=scheduled_training)

model.load_overfit(device=device)
model.to(device)
model = model.eval()

# Dir for writing generated files
writing_dir = f'{os.getcwd()}/generation'
if not os.path.isdir(writing_dir):
    os.makedirs(writing_dir)

# Create server
server = OrchestraServer(args.in_port,
                         args.out_port,
                         args.ip,
                         args.ip_client,
                         model=model,
                         subdivision=subdivision,
                         writing_dir=writing_dir)
コード例 #2
0
def main(args):
    """

    :param args:
    :return:
    """
    dropout = 0.
    input_dropout = 0.
    input_dropout_token = 0.
    mixup = False
    scheduled_training = 0.
    group_instrument_per_section = False
    reduction_flag = False
    lr = 1.
    cpc_config_name = None
    subdivision = args.subdivision

    # Use all gpus available
    gpu_ids = [int(gpu) for gpu in range(torch.cuda.device_count())]
    print(f'Using GPUs {gpu_ids}')
    if len(gpu_ids) == 0:
        device = 'cpu'
    else:
        device = 'cuda'

    # Get dataset
    dataset_manager = DatasetManager()
    dataset, processor_decoder, processor_encoder, processor_encodencoder = \
        dataset_import.get_dataset(dataset_manager, args.dataset_type, args.subdivision, args.sequence_size,
                                   args.velocity_quantization, args.max_transposition,
                                   args.num_heads, args.per_head_dim, args.local_position_embedding_dim,
                                   args.block_attention,
                                   group_instrument_per_section, args.nade, cpc_config_name, args.double_conditioning,
                                   args.instrument_presence_in_encoder)

    # Load model
    model = Transformer(dataset=dataset,
                        data_processor_encodencoder=processor_encodencoder,
                        data_processor_encoder=processor_encoder,
                        data_processor_decoder=processor_decoder,
                        num_heads=args.num_heads,
                        per_head_dim=args.per_head_dim,
                        position_ff_dim=args.position_ff_dim,
                        enc_dec_conditioning=args.enc_dec_conditioning,
                        hierarchical_encoding=args.hierarchical,
                        block_attention=args.block_attention,
                        nade=args.nade,
                        conditioning=args.conditioning,
                        double_conditioning=args.double_conditioning,
                        num_layers=args.num_layers,
                        dropout=dropout,
                        input_dropout=input_dropout,
                        input_dropout_token=input_dropout_token,
                        lr=lr,
                        reduction_flag=reduction_flag,
                        gpu_ids=gpu_ids,
                        suffix=args.suffix,
                        mixup=mixup,
                        scheduled_training=scheduled_training)

    model.load_overfit(device=device)
    model.to(device)
    model = model.eval()

    # Dir for writing generated files
    writing_dir = f'{os.getcwd()}/generation'
    if not os.path.isdir(writing_dir):
        os.makedirs(writing_dir)

    # Create server
    server_address = (args.ip, args.port)
    server = OrchestraServer(server_address, model, subdivision, writing_dir)
    print(f'[Server listening to {args.ip} on port {args.port}]')
    server.serve_forever()