Skip to content

Commit

Permalink
[xdoctest][task 355] reformat example code with google style in `pir.…
Browse files Browse the repository at this point in the history
…cc` (#58177)

* 1/3

* 2/3

* new_ir => pir
  • Loading branch information
DrRyanHuang authored Nov 2, 2023
1 parent a827e97 commit 53e5a2d
Showing 1 changed file with 97 additions and 49 deletions.
146 changes: 97 additions & 49 deletions paddle/fluid/pybind/pir.cc
Original file line number Diff line number Diff line change
Expand Up @@ -143,20 +143,41 @@ void BindProgram(py::module *m) {
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
>>> import paddle
>>> import paddle.static as static
>>> paddle.enable_static()
>>> main_program = static.Program()
>>> startup_program = static.Program()
>>> with static.program_guard(main_program=main_program, startup_program=startup_program):
... x = static.data(name="x", shape=[-1, 784], dtype='float32')
... y = static.data(name="y", shape=[-1, 1], dtype='int32')
... z = static.nn.fc(name="fc", x=x, size=10, activation="relu")
>>> print("main program is: {}".format(main_program))
main program is: { // block 0
var x : LOD_TENSOR.shape(-1, 784).dtype(float32).stop_gradient(True)
var y : LOD_TENSOR.shape(-1, 1).dtype(int32).stop_gradient(True)
persist trainable param fc.w_0 : LOD_TENSOR.shape(784, 10).dtype(float32).stop_gradient(False)
var fc.tmp_0 : LOD_TENSOR.shape(-1, 10).dtype(float32).stop_gradient(False)
persist trainable param fc.b_0 : LOD_TENSOR.shape(10,).dtype(float32).stop_gradient(False)
var fc.tmp_1 : LOD_TENSOR.shape(-1, 10).dtype(float32).stop_gradient(False)
var fc.tmp_2 : LOD_TENSOR.shape(-1, 10).dtype(float32).stop_gradient(False)
{Out=['fc.tmp_0']} = mul(inputs={X=['x'], Y=['fc.w_0']}, force_fp32_output = False, op_device = , op_namescope = /, op_role = 0, op_role_var = [], scale_out = 1.0, scale_x = 1.0, scale_y = [1.0], use_mkldnn = False, with_quant_attr = False, x_num_col_dims = 1, y_num_col_dims = 1)
{Out=['fc.tmp_1']} = elementwise_add(inputs={X=['fc.tmp_0'], Y=['fc.b_0']}, Scale_out = 1.0, Scale_x = 1.0, Scale_y = 1.0, axis = 1, mkldnn_data_type = float32, op_device = , op_namescope = /, op_role = 0, op_role_var = [], use_mkldnn = False, use_quantizer = False, with_quant_attr = False, x_data_format = , y_data_format = )
{Out=['fc.tmp_2']} = relu(inputs={X=['fc.tmp_1']}, op_device = , op_namescope = /, op_role = 0, op_role_var = [], use_cudnn = False, use_mkldnn = False, with_quant_attr = False)
}
main_program = static.Program()
startup_program = static.Program()
with static.program_guard(main_program=main_program, startup_program=startup_program):
x = static.data(name="x", shape=[-1, 784], dtype='float32')
y = static.data(name="y", shape=[-1, 1], dtype='int32')
z = static.nn.fc(name="fc", x=x, size=10, activation="relu")
>>> print("start up program is: {}".format(startup_program))
start up program is: { // block 0
persist trainable param fc.w_0 : LOD_TENSOR.shape(784, 10).dtype(float32).stop_gradient(False)
persist trainable param fc.b_0 : LOD_TENSOR.shape(10,).dtype(float32).stop_gradient(False)
print("main program is: {}".format(main_program))
print("start up program is: {}".format(startup_program))
{Out=['fc.w_0']} = uniform_random(inputs={ShapeTensor=[], ShapeTensorList=[]}, diag_num = 0, diag_step = 0, diag_val = 1.0, dtype = 5, max = 0.08692913502454758, min = -0.08692913502454758, op_device = , op_namescope = /, op_role = 0, op_role_var = [], seed = 0, shape = [784, 10], with_quant_attr = False)
{Out=['fc.b_0']} = fill_constant(inputs={}, dtype = 5, force_cpu = False, op_device = , op_namescope = /, op_role = 0, op_role_var = [], place_type = -1, shape = [10], str_value = 0.0, use_mkldnn = False, value = 0.0, with_quant_attr = False)
}
)DOC");
program
.def("__init__",
Expand Down Expand Up @@ -1206,24 +1227,37 @@ void BindUtils(pybind11::module *m) {
Examples:
.. code-block:: python
import paddle
from paddle import pir
paddle.enable_static()
x = paddle.randn([4, 4])
main_program, start_program = (
paddle.static.Program(),
paddle.static.Program(),
)
with paddle.static.program_guard(main_program, start_program):
x_s = paddle.static.data('x', [4, 4], x.dtype)
x_s.stop_gradient = False
y_s = paddle.matmul(x_s, x_s)
z_s = paddle.add(y_s, y_s)
k_s = paddle.tanh(z_s)
newir_program = pir.translate_to_new_ir(main_program.desc)
print(newir_program)
>>> import os
>>> # Paddle will remove this flag in the next version
>>> pir_flag = 'FLAGS_enable_new_ir_in_executor'
>>> os.environ[pir_flag] = 'True'
>>> import paddle
>>> from paddle import pir
>>> paddle.enable_static()
>>> x = paddle.randn([4, 4])
>>> main_program, start_program = (
... paddle.static.Program(),
... paddle.static.Program(),
...)
>>> with paddle.static.program_guard(main_program, start_program):
... x_s = paddle.static.data('x', [4, 4], x.dtype)
... x_s.stop_gradient = False
... y_s = paddle.matmul(x_s, x_s)
... z_s = paddle.add(y_s, y_s)
... k_s = paddle.tanh(z_s)
>>> newir_program = pir.translate_to_new_ir(main_program.desc)
>>> print(newir_program)
{
(%0) = "pd_op.data" () {dtype:(pd_op.DataType)float32,is_persisable:[false],name:"x",place:(pd_op.Place)Place(undefined:0),shape:(pd_op.IntArray)[4,4],stop_gradient:[false]} : () -> pd_op.tensor<4x4xf32>
(%1) = "pd_op.matmul" (%0, %0) {is_persisable:[false],stop_gradient:[false],transpose_x:false,transpose_y:false} : (pd_op.tensor<4x4xf32>, pd_op.tensor<4x4xf32>) -> pd_op.tensor<4x4xf32>
(%2) = "pd_op.add" (%1, %1) {is_persisable:[false],stop_gradient:[false]} : (pd_op.tensor<4x4xf32>, pd_op.tensor<4x4xf32>) -> pd_op.tensor<4x4xf32>
(%3) = "pd_op.tanh" (%2) {is_persisable:[false],stop_gradient:[false]} : (pd_op.tensor<4x4xf32>) -> pd_op.tensor<4x4xf32>
}
)DOC");
m->def(
Expand Down Expand Up @@ -1268,25 +1302,39 @@ void BindUtils(pybind11::module *m) {
Examples:
.. code-block:: python
import paddle
from paddle import pir
paddle.enable_static()
x = paddle.randn([4, 4])
main_program, start_program = (
paddle.static.Program(),
paddle.static.Program(),
)
with paddle.static.program_guard(main_program, start_program):
x_s = paddle.static.data('x', [4, 4], x.dtype)
x_s.stop_gradient = False
y_s = paddle.matmul(x_s, x_s)
z_s = paddle.add(y_s, y_s)
k_s = paddle.tanh(z_s)
newir_program, mappings = pir.translate_to_new_ir_with_param_map(main_program.desc)
print(newir_program)
print(mappings)
>>> import os
>>> # Paddle will remove this flag in the next version
>>> pir_flag = 'FLAGS_enable_new_ir_in_executor'
>>> os.environ[pir_flag] = 'True'
>>> import paddle
>>> from paddle import pir
>>> paddle.enable_static()
>>> x = paddle.randn([4, 4])
>>> main_program, start_program = (
... paddle.static.Program(),
... paddle.static.Program(),
... )
>>> with paddle.static.program_guard(main_program, start_program):
... x_s = paddle.static.data('x', [4, 4], x.dtype)
... x_s.stop_gradient = False
... y_s = paddle.matmul(x_s, x_s)
... z_s = paddle.add(y_s, y_s)
... k_s = paddle.tanh(z_s)
>>> newir_program, mappings = pir.translate_to_new_ir_with_param_map(main_program.desc)
>>> print(newir_program)
{
(%0) = "pd_op.data" () {dtype:(pd_op.DataType)float32,is_persisable:[false],name:"x",place:(pd_op.Place)Place(undefined:0),shape:(pd_op.IntArray)[4,4],stop_gradient:[false]} : () -> pd_op.tensor<4x4xf32>
(%1) = "pd_op.matmul" (%0, %0) {is_persisable:[false],stop_gradient:[false],transpose_x:false,transpose_y:false} : (pd_op.tensor<4x4xf32>, pd_op.tensor<4x4xf32>) -> pd_op.tensor<4x4xf32>
(%2) = "pd_op.add" (%1, %1) {is_persisable:[false],stop_gradient:[false]} : (pd_op.tensor<4x4xf32>, pd_op.tensor<4x4xf32>) -> pd_op.tensor<4x4xf32>
(%3) = "pd_op.tanh" (%2) {is_persisable:[false],stop_gradient:[false]} : (pd_op.tensor<4x4xf32>) -> pd_op.tensor<4x4xf32>
}
>>> print(mappings)
{'matmul_v2_0.tmp_0': [Value(define_op_name=pd_op.matmul, index=0, dtype=pd_op.tensor<4x4xf32>)], 'x': [Value(define_op_name=pd_op.data, index=0, dtype=pd_op.tensor<4x4xf32>)], 'tanh_0.tmp_0': [Value(define_op_name=pd_op.tanh, index=0, dtype=pd_op.tensor<4x4xf32>)], 'elementwise_add_0': [Value(define_op_name=pd_op.add, index=0, dtype=pd_op.tensor<4x4xf32>)]}
)DOC");
}

Expand Down

0 comments on commit 53e5a2d

Please sign in to comment.