1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
| PyObject* THPVariable_pynew( PyTypeObject* type, PyObject* args, PyObject* kwargs) { HANDLE_TH_ERRORS TORCH_CHECK( type != &THPVariableType, "Cannot directly construct _TensorBase; subclass it and then construct that"); jit::tracer::warn("torch.Tensor", jit::tracer::WARN_CONSTRUCTOR); auto tensor = torch::utils::base_tensor_ctor(args, kwargs); return THPVariable_NewWithVar( type, std::move(tensor), c10::impl::PyInterpreterStatus::MAYBE_UNINITIALIZED, true); END_HANDLE_TH_ERRORS }
Tensor legacy_tensor_generic_ctor_new( c10::DispatchKey dispatch_key, at::ScalarType scalar_type, PyObject* args, PyObject* kwargs, CtorOrNew ctor_or_new) { auto options = dispatchKeyToTensorOptions(dispatch_key); static PythonArgParser parser({ "new(*, Device? device=None)", "new(Storage storage)", "new(*, int64_t cdata)|hidden", "new(Tensor other)", "new(Tensor other, *, Device? device=None)|hidden", "new(SymIntArrayRef size, *, Device? device=None)", "new(PyObject* data, *, Device? device=None)", });
...
ParsedArgs<2> parsed_args; auto r = parser.parse(args, kwargs, parsed_args); if (r.idx == 0) { ... } else if (r.idx == 1) { ... } else if (r.idx == 2) { ... } else if (r.idx == 3) { ... } else if (r.idx == 4) { ... } else if (r.idx == 5) { ... } else if (r.idx == 6) { auto deviceOptional = r.deviceOptional(1); ... return legacy_new_from_sequence( options, scalar_type, deviceOptional, r.pyobject(0)); } throw std::runtime_error("new(): invalid arguments"); }
Tensor legacy_new_from_sequence( c10::TensorOptions options, at::ScalarType scalar_type, c10::optional<Device> device, PyObject* data) { if (!PySequence_Check(data)) { throw TypeError( "new(): data must be a sequence (got %s)", Py_TYPE(data)->tp_name); } return internal_new_from_data( options, scalar_type, device, data, false, false, false); }
Tensor internal_new_from_data( c10::TensorOptions options, at::ScalarType scalar_type, c10::optional<Device> device_opt, PyObject* data, bool copy_variables, bool copy_numpy, bool type_inference, bool pin_memory = false) { ...
auto device = device_opt.has_value() ? *device_opt : options.device();
auto sizes = compute_sizes(data, scalar_type);
ScalarType inferred_scalar_type = type_inference ? infer_scalar_type(data) : scalar_type;
Tensor tensor; { ... TensorOptions opts = at::initialTensorOptions().dtype(inferred_scalar_type);
if (device == at::kMeta) { return at::empty(sizes, opts.device(device)); } tensor = at::empty(sizes, opts.pinned_memory(pin_memory)); if (c10::multiply_integers(tensor.sizes()) != 0) { recursive_store( (char*)tensor.data_ptr(), tensor.sizes(), tensor.strides(), 0, inferred_scalar_type, tensor.dtype().itemsize(), data); } } pybind11::gil_scoped_release no_gil; maybe_initialize_cuda(device); tensor = tensor.to( device, inferred_scalar_type, false, false); ... return at::lift_fresh(tensor); }
inline at::Tensor empty(at::IntArrayRef size, at::TensorOptions options={}, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) { return at::_ops::empty_memory_format::call(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); }
at::Tensor empty_memory_format::call(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
static auto op = create_empty_memory_format_typed_handle(); return op.call(size, dtype, layout, device, pin_memory, memory_format); }
static PyObject* THPVariable_NewWithVar( PyTypeObject* type, Variable _var, c10::impl::PyInterpreterStatus status, bool allow_preexisting_pyobj) {
...
PyObject* obj = type->tp_alloc(type, 0); if (obj) { auto v = (THPVariable*)obj; new (&v->cdata) MaybeOwned<Variable>(); ... v->cdata = MaybeOwned<Variable>::owned(std::move(_var)); ... } return obj; }
|