Python源码示例:torch.serialization()

示例1
def restricted_loads(s):
    result = RestrictedUnpickler(io.BytesIO(s)).load()
    if torch.is_tensor(result) or isinstance(result, torch.nn.Module):
        _check_hooks_are_valid(result, "_backward_hooks")
    return result


# Adapt torch.load to use RestrictedUnpickler - patched for torch.storage._load_from_bytes
# (Adapted from https://github.com/pytorch/pytorch/blob/master/torch/serialization.py#L602-L773) 
示例2
def _patch_model_io():
        if PatchPyTorchModelIO.__patched:
            return

        if 'torch' not in sys.modules:
            return

        PatchPyTorchModelIO.__patched = True

        # noinspection PyBroadException
        try:
            import torch
            torch.save = _patched_call(torch.save, PatchPyTorchModelIO._save)
            torch.load = _patched_call(torch.load, PatchPyTorchModelIO._load)

            # no need to worry about recursive calls, _patched_call takes care of that
            if hasattr(torch, 'serialization') and hasattr(torch.serialization, '_save'):
                torch.serialization._save = _patched_call(
                    torch.serialization._save, PatchPyTorchModelIO._save)
            if hasattr(torch, 'serialization') and hasattr(torch.serialization, '_load'):
                torch.serialization._load = _patched_call(
                    torch.serialization._load, PatchPyTorchModelIO._load)
            if hasattr(torch, 'serialization') and hasattr(torch.serialization, '_legacy_save'):
                torch.serialization._legacy_save = _patched_call(
                    torch.serialization._legacy_save, PatchPyTorchModelIO._save)
            if hasattr(torch, 'serialization') and hasattr(torch.serialization, '_legacy_load'):
                torch.serialization._legacy_load = _patched_call(
                    torch.serialization._legacy_load, PatchPyTorchModelIO._load)
        except ImportError:
            pass
        except Exception:
            pass  # print('Failed patching pytorch') 
示例3
def build_model(self, args):
        model = super().build_model(args)
        if args.pretrained is not None: # load pretrained model:
            if not os.path.exists(args.pretrained):
                raise ValueError('Could not load pretrained weights \
                                 - from {}'.format(args.pretrained))
            from torch.serialization import default_restore_location
            saved_state = torch.load(
                args.pretrained, 
                map_location=lambda s, l: default_restore_location(s, 'cpu')
            )
            self.adapt_state(saved_state['model'], model)

        return model