Python源码示例:torch.onnx()

示例1
def main(args):
    dataset = load_config(args.dataset)

    num_classes = len(dataset["common"]["classes"])
    net = UNet(num_classes)

    def map_location(storage, _):
        return storage.cpu()

    chkpt = torch.load(args.checkpoint, map_location=map_location)
    net = torch.nn.DataParallel(net)
    net.load_state_dict(chkpt["state_dict"])

    # Todo: make input channels configurable, not hard-coded to three channels for RGB
    batch = torch.autograd.Variable(torch.randn(1, 3, args.image_size, args.image_size))

    torch.onnx.export(net, batch, args.model) 
示例2
def onnx_inference(args):
    # Load the ONNX model
    model = onnx.load("models/deepspeech_{}.onnx".format(args.continue_from))

    # Check that the IR is well formed
    onnx.checker.check_model(model)

    onnx.helper.printable_graph(model.graph)

    print("model checked, preparing backend!")
    rep = backend.prepare(model, device="CPU")  # or "CPU"

    print("running inference!")

    # Hard coded input dim
    inputs = np.random.randn(16, 1, 161, 129).astype(np.float32)

    start = time.time()
    outputs = rep.run(inputs)
    print("time used: {}".format(time.time() - start))
    # To run networks with more than one input, pass a tuple
    # rather than a single numpy ndarray.
    print(outputs[0]) 
示例3
def stylize_onnx_caffe2(content_image, args):
    """
    Read ONNX model and run it using Caffe2
    """

    assert not args.export_onnx

    import onnx
    import onnx_caffe2.backend

    model = onnx.load(args.model)

    prepared_backend = onnx_caffe2.backend.prepare(model, device='CUDA' if args.cuda else 'CPU')
    inp = {model.graph.input[0].name: content_image.numpy()}
    c2_out = prepared_backend.run(inp)[0]

    return torch.from_numpy(c2_out) 
示例4
def stylize_onnx_caffe2(content_image, args):
    """
    Read ONNX model and run it using Caffe2
    """

    assert not args.export_onnx

    import onnx
    import onnx_caffe2.backend

    model = onnx.load(args.model)

    prepared_backend = onnx_caffe2.backend.prepare(model, device='CUDA' if args.cuda else 'CPU')
    inp = {model.graph.input[0].name: content_image.numpy()}
    c2_out = prepared_backend.run(inp)[0]

    return torch.from_numpy(c2_out) 
示例5
def pytorch2onnx(args):
    # PyTorch exports to ONNX without the need for an external converter
    import torch
    from torch.autograd import Variable
    import torch.onnx
    import torchvision
    # Create input with the correct dimensions of the input of your model
    if args.model_input_shapes == None:
        raise ValueError("Please provide --model_input_shapes to convert Pytorch models.")
    dummy_model_input = []
    if len(args.model_input_shapes) == 1:
        dummy_model_input = Variable(torch.randn(*args.model_input_shapes))
    else:
        for shape in args.model_input_shapes:
            dummy_model_input.append(Variable(torch.randn(*shape)))

    # load the PyTorch model
    model = torch.load(args.model, map_location="cpu")

    # export the PyTorch model as an ONNX protobuf
    torch.onnx.export(model, dummy_model_input, args.output_onnx_path) 
示例6
def convert_models(args):
    # Quick format check
    model_extension = get_extension(args.model)
    if (args.model_type == "onnx" or model_extension == "onnx"):
        print("Input model is already ONNX model. Skipping conversion.")
        if args.model != args.output_onnx_path:
            copyfile(args.model, args.output_onnx_path)
        return

    if converters.get(args.model_type) == None:
        raise ValueError('Model type {} is not currently supported. \n\
            Please select one of the following model types -\n\
                cntk, coreml, keras, pytorch, scikit-learn, tensorflow'.format(args.model_type))

    suffix = suffix_format_map.get(model_extension)

    if suffix != None and suffix != args.model_type:
        raise ValueError('model with extension {} do not come from {}'.format(model_extension, args.model_type))

    # Find the corresponding converter for current model
    converter = converters.get(args.model_type)
    # Run converter
    converter(args) 
示例7
def test_dcgan(self):
        # dcgan is flaky on some seeds, see:
        # https://github.com/ProjectToffee/onnx/pull/70
        torch.manual_seed(1)
        if torch.cuda.is_available():
            torch.cuda.manual_seed_all(1)

        netD = dcgan._netD(1)
        netD.apply(dcgan.weights_init)
        input = Variable(torch.randn(BATCH_SIZE, 3, dcgan.imgsz, dcgan.imgsz))
        self.run_model_test(netD, train=False, batch_size=BATCH_SIZE,
                            input=input)

        netG = dcgan._netG(1)
        netG.apply(dcgan.weights_init)
        state_dict = model_zoo.load_url(model_urls['dcgan_b'], progress=False)
        # state_dict = model_zoo.load_url(model_urls['dcgan_f'], progress=False)
        noise = Variable(
            torch.randn(BATCH_SIZE, dcgan.nz, 1, 1).normal_(0, 1))
        self.run_model_test(netG, train=False, batch_size=BATCH_SIZE,
                            input=noise, state_dict=state_dict, rtol=1e-2, atol=1e-6) 
示例8
def test_symbolic_override_nested(self):
        def symb(g, x, y):
            assert isinstance(x, torch._C.Value)
            assert isinstance(y[0], torch._C.Value)
            assert isinstance(y[1], torch._C.Value)
            return g.op('Sum', x, y[0], y[1]), (
                g.op('Neg', x), g.op('Neg', y[0]))

        @torch.onnx.symbolic_override_first_arg_based(symb)
        def foo(x, y):
            return x + y[0] + y[1], (-x, -y[0])

        class BigModule(torch.nn.Module):
            def forward(self, x, y):
                return foo(x, y)

        inp = (Variable(torch.FloatTensor([1])),
               (Variable(torch.FloatTensor([2])),
                Variable(torch.FloatTensor([3]))))
        BigModule()(*inp)
        self.assertONNX(BigModule(), inp) 
示例9
def predict():
    list_of_files = glob.glob('output/models/*')  # * means all if need specific format then *.csv
    model_path = max(list_of_files, key=os.path.getctime)

    print("Generating ONNX from model:", model_path)
    model = torch.load(model_path)

    input_sequences = [
        "SRSLVISTINQISEDSKEFYFTLDNGKTMFPSNSQAWGGEKFENGQRAFVIFNELEQPVNGYDYNIQVRDITKVLTKEIVTMDDEE" \
        "NTEEKIGDDKINATYMWISKDKKYLTIEFQYYSTHSEDKKHFLNLVINNKDNTDDEYINLEFRHNSERDSPDHLGEGYVSFKLDKI" \
        "EEQIEGKKGLNIRVRTLYDGIKNYKVQFP"]

    input_sequences_encoded = list(torch.IntTensor(encode_primary_string(aa))
                                   for aa in input_sequences)

    print("Exporting to ONNX...")

    output_path = "./tests/output/openprotein.onnx"
    onnx_from_model(model, input_sequences_encoded, output_path)

    print("Wrote ONNX to", output_path) 
示例10
def torch2tvm_module(torch_module: torch.nn.Module, torch_inputs: Tuple[torch.Tensor, ...], target):
    torch_module.eval()
    input_names = []
    input_shapes = {}
    with torch.no_grad():
        for index, torch_input in enumerate(torch_inputs):
            name = "i" + str(index)
            input_names.append(name)
            input_shapes[name] = torch_input.shape
        buffer = io.BytesIO()
        torch.onnx.export(torch_module, torch_inputs, buffer, input_names=input_names, output_names=["o" + str(i) for i in range(len(torch_inputs))])
        outs = torch_module(*torch_inputs)
        buffer.seek(0, 0)
        onnx_model = onnx.load_model(buffer)
        relay_module, params = tvm.relay.frontend.from_onnx(onnx_model, shape=input_shapes)
    with tvm.relay.build_config(opt_level=3):
        graph, tvm_module, params = tvm.relay.build(relay_module, target, params=params)
    return graph, tvm_module, params 
示例11
def convert_to_onnx(model, input_shape, output_file, input_names, output_names):
    """Convert PyTorch model to ONNX and check the resulting onnx model"""

    output_file.parent.mkdir(parents=True, exist_ok=True)
    model.eval()
    dummy_input = torch.randn(input_shape)
    model(dummy_input)
    torch.onnx.export(model, dummy_input, str(output_file), verbose=False,
                      input_names=input_names.split(','), output_names=output_names.split(','))

    # Model check after conversion
    model = onnx.load(str(output_file))
    try:
        onnx.checker.check_model(model)
        print('ONNX check passed successfully.')
    except onnx.onnx_cpp2py_export.checker.ValidationError as exc:
        sys.exit('ONNX check failed with error: ' + str(exc)) 
示例12
def export_onnx_model(model, inputs, passes):
    """Trace and export a model to onnx format. Modified from
    https://github.com/facebookresearch/detectron2/

    Args:
        model (nn.Module):
        inputs (tuple[args]): the model will be called by `model(*inputs)`
        passes (None or list[str]): the optimization passed for ONNX model

    Returns:
        an onnx model
    """
    assert isinstance(model, torch.nn.Module)

    # make sure all modules are in eval mode, onnx may change the training
    # state of the module if the states are not consistent
    def _check_eval(module):
        assert not module.training

    model.apply(_check_eval)

    # Export the model to ONNX
    with torch.no_grad():
        with io.BytesIO() as f:
            torch.onnx.export(
                model,
                inputs,
                f,
                operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
                # verbose=True,  # NOTE: uncomment this for debugging
                # export_params=True,
            )
            onnx_model = onnx.load_from_string(f.getvalue())

    # Apply ONNX's Optimization
    if passes is not None:
        all_passes = optimizer.get_available_passes()
        assert all(p in all_passes for p in passes), \
            f'Only {all_passes} are supported'
    onnx_model = optimizer.optimize(onnx_model, passes)
    return onnx_model 
示例13
def export_onnx(path, batch_size, seq_len):
    print('The model is also exported in ONNX format at {}'.
          format(os.path.realpath(args.onnx_export)))
    model.eval()
    dummy_input = torch.LongTensor(seq_len * batch_size).zero_().view(-1, batch_size).to(device)
    hidden = model.init_hidden(batch_size)
    torch.onnx.export(model, (dummy_input, hidden), path)


# Loop over epochs. 
示例14
def export_onnx(path, batch_size, seq_len):
    print('The model is also exported in ONNX format at {}'.
          format(os.path.realpath(args.onnx_export)))
    model.eval()
    dummy_input = torch.LongTensor(seq_len * batch_size).zero_().view(-1, batch_size).to(device)
    hidden = model.init_hidden(batch_size)
    torch.onnx.export(model, (dummy_input, hidden), path)


# Loop over epochs. 
示例15
def export(dir):
    dummy_input = Variable(torch.randn(1, 3, 4, 4))
    model = broadcast_mul()
    model.eval()
    torch.save(model.state_dict(),os.path.join(dir,"broadcast_mul.pth"))
    onnx.export(model, dummy_input,os.path.join(dir,"broadcast_mul.onnx"), verbose=True) 
示例16
def export(dir):
    file_path = os.path.realpath(__file__)
    file_dir = os.path.dirname(file_path)
    dummy_input = Variable(torch.randn(1, 3, 32, 32))
    model = ResNet34()
    # model = load_network(model,os.path.join(file_dir,'..','model','pose_v02.pth'))
    model.eval()
    torch.save(model.state_dict(),os.path.join(dir,"resnet.pth"))
    onnx.export(model, dummy_input,os.path.join(dir,"resnet.onnx"), verbose=True) 
示例17
def export(dir):
    file_path = os.path.realpath(__file__)
    file_dir = os.path.dirname(file_path)
    dummy_input = Variable(torch.randn(1, 3, 32, 32))
    model = GoogLeNet()
    # model = load_network(model,os.path.join(file_dir,'..','model','pose_v02.pth'))
    model.eval()
    torch.save(model.state_dict(),os.path.join(dir,"googlenet.pth"))
    onnx.export(model, dummy_input,os.path.join(dir,"googlenet.onnx"), verbose=True) 
示例18
def export(dir):
    dummy_input = Variable(torch.randn(1, 3, 224, 224))
    model = MobileNetV2()
    model.eval()
    torch.save(model.state_dict(),os.path.join(dir,"MobileNetV2.pth"))
    onnx.export(model, dummy_input,os.path.join(dir,"MobileNetV2.onnx"), verbose=True) 
示例19
def export(dir):
    file_path = os.path.realpath(__file__)
    file_dir = os.path.dirname(file_path)
    dummy_input = Variable(torch.randn(1, 3, 224, 224))
    model = AlexNet()
    # model = load_network(model,os.path.join(file_dir,'..','model','pose_v02.pth'))
    model.eval()
    torch.save(model.state_dict(),os.path.join(dir,"alexnet.pth"))
    onnx.export(model, dummy_input,os.path.join(dir,"alexnet.onnx"), verbose=True) 
示例20
def gen_to_onnx(self, path, input_size, label=None, input_type=torch.FloatTensor, **kwargs):
        input = torch.randn(input_size).type(input_type).unsqueeze(0)
        if label is None:
            inputs = Variable(input.cuda() if self.cuda else input)
        else:
            label = torch.randn(1, 1).fill_(label)
            inputs = [Variable(input.cuda() if self.cuda else input), label.cuda() if self.cuda else label]

        return onnx._export(self.gen_model, inputs, f=path, **kwargs) 
示例21
def disc_to_onnx(self, path, input_size, label=None, input_type=torch.FloatTensor, **kwargs):
        input = torch.randn(input_size).type(input_type).unsqueeze(0)
        if label is None:
            inputs = Variable(input.cuda() if self.cuda else input)
        else:
            label = torch.randn(1, 1).fill_(label)
            inputs = [Variable(input.cuda() if self.cuda else input), label.cuda() if self.cuda else label]

        return onnx._export(self.disc_model, inputs, f=path, **kwargs) 
示例22
def to_onnx(self,input_sizes,path,input_types=torch.FloatTensor,**kwargs):
        if isinstance(input_sizes,list):
            inputs = (torch.randn(input_size).type(input_type).unsqueeze(0) for input_size, input_type in zip(input_sizes,input_types))

            inputs = (input.cuda() if self.cuda else input for input in inputs)
        else:
            inputs = torch.randn(input_sizes).type(input_types).unsqueeze(0)

            inputs = inputs.cuda() if self.cuda else inputs

        return onnx._export(self.model, inputs, f=path, **kwargs) 
示例23
def to_onnx(self, input_sizes, path, input_types=torch.FloatTensor, **kwargs):
    if isinstance(input_sizes, list):
        inputs = (torch.randn(input_size).type(input_type).unsqueeze(0) for input_size, input_type in
                  zip(input_sizes, input_types))

        inputs = (input.cuda() if self.cuda else input for input in inputs)
    else:
        inputs = torch.randn(input_sizes).type(input_types).unsqueeze(0)

        inputs = inputs.cuda() if self.cuda else inputs

    return onnx._export(self.model, inputs, f=path, **kwargs) 
示例24
def export_onnx(path, batch_size, seq_len):
    print('The model is also exported in ONNX format at {}'.
          format(os.path.realpath(args.onnx_export)))
    model.eval()
    dummy_input = torch.LongTensor(seq_len * batch_size).zero_().view(-1, batch_size).to(device)
    hidden = model.init_hidden(batch_size)
    torch.onnx.export(model, (dummy_input, hidden), path)


# Loop over epochs. 
示例25
def stylize(args):
    device = torch.device("cuda" if args.cuda else "cpu")

    content_image = utils.load_image(args.content_image, scale=args.content_scale)
    content_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Lambda(lambda x: x.mul(255))
    ])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)

    if args.model.endswith(".onnx"):
        output = stylize_onnx_caffe2(content_image, args)
    else:
        with torch.no_grad():
            style_model = TransformerNet()
            state_dict = torch.load(args.model)
            # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
            for k in list(state_dict.keys()):
                if re.search(r'in\d+\.running_(mean|var)$', k):
                    del state_dict[k]
            style_model.load_state_dict(state_dict)
            style_model.to(device)
            if args.export_onnx:
                assert args.export_onnx.endswith(".onnx"), "Export model file should end with .onnx"
                output = torch.onnx._export(style_model, content_image, args.export_onnx).cpu()
            else:
                output = style_model(content_image).cpu()
    utils.save_image(args.output_image, output[0]) 
示例26
def export_onnx(path, batch_size, seq_len):
    print('The model is also exported in ONNX format at {}'.
          format(os.path.realpath(args.onnx_export)))
    model.eval()
    dummy_input = torch.LongTensor(seq_len * batch_size).zero_().view(-1, batch_size).to(device)
    hidden = model.init_hidden(batch_size)
    torch.onnx.export(model, (dummy_input, hidden), path)


# Loop over epochs. 
示例27
def export_onnx_model(model, inputs):
    """
    Trace and export a model to onnx format.

    Args:
        model (nn.Module):
        inputs (tuple[args]): the model will be called by `model(*inputs)`

    Returns:
        an onnx model
    """
    assert isinstance(model, torch.nn.Module)

    # make sure all modules are in eval mode, onnx may change the training state
    # of the module if the states are not consistent
    def _check_eval(module):
        assert not module.training

    model.apply(_check_eval)

    # Export the model to ONNX
    with torch.no_grad():
        with io.BytesIO() as f:
            torch.onnx.export(
                model,
                inputs,
                f,
                operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
                # verbose=True,  # NOTE: uncomment this for debugging
                # export_params=True,
            )
            onnx_model = onnx.load_from_string(f.getvalue())

    # Apply ONNX's Optimization
    all_passes = onnx.optimizer.get_available_passes()
    passes = ["fuse_bn_into_conv"]
    assert all(p in all_passes for p in passes)
    onnx_model = onnx.optimizer.optimize(onnx_model, passes)
    return onnx_model 
示例28
def stylize(args):
    device = torch.device("cuda" if args.cuda else "cpu")

    content_image = utils.load_image(args.content_image, scale=args.content_scale)
    content_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Lambda(lambda x: x.mul(255))
    ])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)

    if args.model.endswith(".onnx"):
        output = stylize_onnx_caffe2(content_image, args)
    else:
        with torch.no_grad():
            style_model = TransformerNet()
            state_dict = torch.load(args.model)
            # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
            for k in list(state_dict.keys()):
                if re.search(r'in\d+\.running_(mean|var)$', k):
                    del state_dict[k]
            style_model.load_state_dict(state_dict)
            style_model.to(device)
            if args.export_onnx:
                assert args.export_onnx.endswith(".onnx"), "Export model file should end with .onnx"
                output = torch.onnx._export(style_model, content_image, args.export_onnx).cpu()
            else:
                output = style_model(content_image).cpu()
    utils.save_image(args.output_image, output[0]) 
示例29
def export_onnx(path, batch_size, seq_len):
    print('The model is also exported in ONNX format at {}'.
          format(os.path.realpath(args.onnx_export)))
    model.eval()
    dummy_input = torch.LongTensor(seq_len * batch_size).zero_().view(-1, batch_size).to(device)
    hidden = model.init_hidden(batch_size)
    torch.onnx.export(model, (dummy_input, hidden), path)


# Loop over epochs. 
示例30
def equalAndThen(self, x, y, msg, k):
        """
        Helper for implementing 'requireEqual' and 'checkEqual'.  Upon failure,
        invokes continuation 'k' with the error message.
        """
        if isinstance(x, onnx.TensorProto) and isinstance(y, onnx.TensorProto):
            self.equalAndThen(x.name, y.name, msg, k)
            # Use numpy for the comparison
            t1 = onnx.numpy_helper.to_array(x)
            t2 = onnx.numpy_helper.to_array(y)
            new_msg = "{}In embedded parameter '{}'".format(colonize(msg), x.name)
            self.equalAndThen(t1, t2, new_msg, k)
        elif isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
            try:
                np.testing.assert_equal(x, y)
            except AssertionError as e:
                raise
                k("{}{}".format(colonize(msg, ": "), str(e).lstrip()))
        else:
            if x != y:
                # TODO: Better algorithm for lists
                sx = str(x)
                sy = str(y)
                if len(sx) > 40 or len(sy) > 40 or '\n' in sx or '\n' in sy:
                    # long form
                    l = "=" * 50
                    k("\n{}The value\n{}\n{}\n{}\n\ndoes not equal\n\n{}\n{}\n{}"
                        .format(colonize(msg, ":\n"), l, sx, l, l, sy, l))
                else:
                    k("{}{} != {}".format(colonize(msg), sx, sy))