importioimporttorchfrom._utilsimport_type,_cudafromtypingimportAny,TypeVar,TypeT=TypeVar('T',bound='_StorageBase')class_StorageBase(object):_cdata:Anyis_cuda:bool=Falseis_sparse:bool=Falsedef__init__(self,*args,**kwargs):...# noqa: E704def__len__(self)->int:...# noqa: E704def__getitem__(self,idx):...# noqa: E704defcopy_(self,source:T)->T:...# noqa: E704defsize(self)->int:...# noqa: E704deftype(self,dtype:str=None,non_blocking:bool=False)->T:...# noqa: E704defcuda(self,device=None,non_blocking=False,**kwargs)->T:...# noqa: E704defelement_size(self)->int:...# noqa: E704defget_device(self)->int:...# noqa: E704# Defined in torch/csrc/generic/StorageSharing.cppdef_share_filename_(self):...# noqa: E704def_share_fd_(self):...# noqa: E704@classmethoddef_new_using_filename(cls:Type[T],size:int)->T:...# noqa: E704@classmethoddef_new_using_fd(cls:Type[T],size:int)->T:...# noqa: E704def__str__(self):content=' '+'\n '.join(str(self[i])foriinrange(len(self)))returncontent+f'\n[{torch.typename(self)} of size {len(self)}]'def__repr__(self):returnstr(self)def__iter__(self):returniter(map(lambdai:self[i],range(self.size())))def__copy__(self):returnself.clone()def__deepcopy__(self,memo):memo=memo.setdefault('torch',{})ifself._cdatainmemo:returnmemo[self._cdata]new_storage=self.clone()memo[self._cdata]=new_storagereturnnew_storagedef__reduce__(self):b=io.BytesIO()torch.save(self,b,_use_new_zipfile_serialization=False)return(_load_from_bytes,(b.getvalue(),))def__sizeof__(self):returnsuper(_StorageBase,self).__sizeof__()+self.element_size()*self.size()defclone(self):"""Returns a copy of this storage"""device=self.get_device()ifself.is_cudaelse-1withtorch.cuda.device(device):returntype(self)(self.size()).copy_(self)deftolist(self):"""Returns a list containing the elements of this storage"""returnlist(self)defcpu(self):"""Returns a CPU copy of this storage if it's not already on the CPU"""returnself.type(getattr(torch,self.__class__.__name__))defdouble(self):"""Casts this storage to double type"""returnself.type(type(self).__module__+'.DoubleStorage')deffloat(self):"""Casts this storage to float type"""returnself.type(type(self).__module__+'.FloatStorage')defhalf(self):"""Casts this storage to half type"""returnself.type(type(self).__module__+'.HalfStorage')deflong(self):"""Casts this storage to long type"""returnself.type(type(self).__module__+'.LongStorage')defint(self):"""Casts this storage to int type"""returnself.type(type(self).__module__+'.IntStorage')defshort(self):"""Casts this storage to short type"""returnself.type(type(self).__module__+'.ShortStorage')defchar(self):"""Casts this storage to char type"""returnself.type(type(self).__module__+'.CharStorage')defbyte(self):"""Casts this storage to byte type"""returnself.type(type(self).__module__+'.ByteStorage')defbool(self):"""Casts this storage to bool type"""returnself.type(type(self).__module__+'.BoolStorage')defbfloat16(self):"""Casts this storage to bfloat16 type"""returnself.type(type(self).__module__+'.BFloat16Storage')defcomplex_double(self):"""Casts this storage to complex double type"""returnself.type(type(self).__module__+'.ComplexDoubleStorage')defcomplex_float(self):"""Casts this storage to complex float type"""returnself.type(type(self).__module__+'.ComplexFloatStorage')defpin_memory(self):"""Copies the storage to pinned memory, if it's not already pinned."""ifself.is_cuda:raiseTypeError(f"cannot pin '{self.type()}' only CPU memory can be pinned")importtorch.cudaallocator=torch.cuda._host_allocator()# type: ignore[attr-defined]returntype(self)(self.size(),allocator=allocator).copy_(self)defshare_memory_(self):"""Moves the storage to shared memory. This is a no-op for storages already in shared memory and for CUDA storages, which do not need to be moved for sharing across processes. Storages in shared memory cannot be resized. Returns: self """fromtorch.multiprocessingimportget_sharing_strategyifself.is_cuda:pass# CUDA doesn't use POSIX shared memoryelifget_sharing_strategy()=='file_system':self._share_filename_()else:self._share_fd_()returnself@classmethoddef_new_shared(cls,size):"""Creates a new storage in shared memory with the same data type"""fromtorch.multiprocessingimportget_sharing_strategyifcls.is_cuda:returncls(size)elifget_sharing_strategy()=='file_system':returncls._new_using_filename(size)else:returncls._new_using_fd(size)def_load_from_bytes(b):returntorch.load(io.BytesIO(b))_StorageBase.type=_type# type: ignore[assignment]_StorageBase.cuda=_cuda# type: ignore[assignment]
Docs
Access comprehensive developer documentation for PyTorch
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: Cookies Policy.