Skip to content

Commit

Permalink
Loop to minimize benchmark overhead in results
Browse files Browse the repository at this point in the history
  • Loading branch information
sloretz committed Jul 22, 2022
1 parent 3e49727 commit 60cbd2c
Show file tree
Hide file tree
Showing 5 changed files with 88 additions and 64 deletions.
27 changes: 15 additions & 12 deletions rclpy/test/benchmark/test_benchmark_actions.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,17 @@
# limitations under the License.

from action_msgs.msg import GoalStatus
import pytest
import rclpy
from rclpy.action import ActionClient, ActionServer
from rclpy.executors import SingleThreadedExecutor
from rclpy.qos import qos_profile_services_default, QoSProfile, ReliabilityPolicy
from rclpy.qos import qos_profile_services_default, ReliabilityPolicy
from test_msgs.action import Fibonacci


def test_one_goal(benchmark):
ONE_HUNDRED = 100


def test_one_hundred_goals(benchmark):
context = rclpy.context.Context()
rclpy.init(context=context)
try:
Expand All @@ -38,7 +40,7 @@ def test_one_goal(benchmark):

def execute_cb(goal_handle):
goal_handle.succeed()
return response
return Fibonacci.Result()

action_server = ActionServer(
node,
Expand All @@ -55,15 +57,16 @@ def execute_cb(goal_handle):
assert action_client.wait_for_server(timeout_sec=5.0)

def bm():
fut = action_client.send_goal_async(Fibonacci.Goal())
executor.spin_until_future_complete(fut)
goal_handle = fut.result() # Raises if there was an error
assert goal_handle.accepted
result_fut= goal_handle.get_result_async()
executor.spin_until_future_complete(result_fut)
return result_fut.result().status
for _ in range(ONE_HUNDRED):
goal_fut = action_client.send_goal_async(Fibonacci.Goal())
executor.spin_until_future_complete(goal_fut)
client_goal_handle = goal_fut.result()
assert client_goal_handle.accepted
result_fut = client_goal_handle.get_result_async()
executor.spin_until_future_complete(result_fut)
assert GoalStatus.STATUS_SUCCEEDED == result_fut.result().status

assert GoalStatus.STATUS_SUCCEEDED == benchmark(bm)
benchmark(bm)

executor.shutdown()
action_client.destroy()
Expand Down
17 changes: 10 additions & 7 deletions rclpy/test/benchmark/test_benchmark_client_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import pytest
import rclpy
from rclpy.executors import SingleThreadedExecutor
from rclpy.qos import qos_profile_services_default, QoSProfile, ReliabilityPolicy
from rclpy.qos import qos_profile_services_default, ReliabilityPolicy
from test_msgs.srv import Empty as EmptySrv


def test_one_call(benchmark):
ONE_THOUSAND = 1000


def test_one_thousand_service_calls(benchmark):
context = rclpy.context.Context()
rclpy.init(context=context)
try:
Expand All @@ -40,11 +42,12 @@ def cb(_, response):
assert client.wait_for_service(timeout_sec=5.0)

def bm():
fut = client.call_async(EmptySrv.Request())
executor.spin_until_future_complete(fut)
return fut.result() # Raises if there was an error
for _ in range(ONE_THOUSAND):
fut = client.call_async(EmptySrv.Request())
executor.spin_until_future_complete(fut)
assert fut.result() # Raises if there was an error

assert isinstance(benchmark(bm), EmptySrv.Response)
benchmark(bm)

executor.shutdown()
node.destroy_client(client)
Expand Down
40 changes: 23 additions & 17 deletions rclpy/test/benchmark/test_benchmark_guard_condition.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,33 +12,37 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import pytest
import rclpy
from rclpy.executors import SingleThreadedExecutor


def test_one_callback(benchmark):
ONE_THOUSAND = 1000


def test_one_thousand_guard_callbacks(benchmark):
context = rclpy.context.Context()
rclpy.init(context=context)
try:
node = rclpy.create_node('benchmark_many_gc_calls', context=context)
called = False
num_calls = 0

def cb():
nonlocal called
called = True
nonlocal num_calls
num_calls += 1

gc = node.create_guard_condition(cb)

executor = SingleThreadedExecutor(context=context)
executor.add_node(node)

def bm():
nonlocal called
gc.trigger()
while not called:
nonlocal num_calls
# Reset for each benchmark run
num_calls = 0
while num_calls < ONE_THOUSAND:
gc.trigger()
executor.spin_once(timeout_sec=0)
return called
return num_calls == ONE_THOUSAND

assert benchmark(bm)

Expand All @@ -49,28 +53,30 @@ def bm():
rclpy.shutdown(context=context)


def test_one_coroutine_callback(benchmark):
def test_one_thousand_guard_coroutine_callbacks(benchmark):
context = rclpy.context.Context()
rclpy.init(context=context)
try:
node = rclpy.create_node('benchmark_many_gc_calls', context=context)
called = False
num_calls = 0

async def coro():
nonlocal called
called = True
nonlocal num_calls
num_calls += 1

gc = node.create_guard_condition(coro)

executor = SingleThreadedExecutor(context=context)
executor.add_node(node)

def bm():
nonlocal called
gc.trigger()
while not called:
nonlocal num_calls
# Reset for each benchmark run
num_calls = 0
while num_calls < ONE_THOUSAND:
gc.trigger()
executor.spin_once(timeout_sec=0)
return called
return num_calls == ONE_THOUSAND

assert benchmark(bm)

Expand Down
23 changes: 15 additions & 8 deletions rclpy/test/benchmark/test_benchmark_pub_sub.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,25 +12,29 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import pytest
import rclpy
from rclpy.executors import SingleThreadedExecutor
from rclpy.qos import QoSProfile, ReliabilityPolicy
from test_msgs.msg import Empty as EmptyMsg


def test_one_message(benchmark):
ONE_THOUSAND = 1000


def test_one_thousand_messages(benchmark):
context = rclpy.context.Context()
rclpy.init(context=context)
try:
node = rclpy.create_node('benchmark_pub_sub', context=context)
qos = QoSProfile(depth=1, reliability=ReliabilityPolicy.RELIABLE)
pub = node.create_publisher(EmptyMsg, 'empty', qos)
called = False
num_calls = 0

def cb(_):
nonlocal called
called = True
nonlocal num_calls
num_calls += 1
# Send next message
pub.publish(EmptyMsg())

sub = node.create_subscription(EmptyMsg, 'empty', cb, qos)

Expand All @@ -42,11 +46,14 @@ def cb(_):
executor.spin_once(timeout_sec=0.01)

def bm():
nonlocal called
nonlocal num_calls
# Reset for each benchmark run
num_calls = 0
# Prime the loop with a message
pub.publish(EmptyMsg())
while not called:
while num_calls < ONE_THOUSAND:
executor.spin_once(timeout_sec=0)
return called
return num_calls == ONE_THOUSAND

assert benchmark(bm)

Expand Down
45 changes: 25 additions & 20 deletions rclpy/test/benchmark/test_benchmark_timer.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,33 +12,36 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import pytest
import rclpy
from rclpy.executors import SingleThreadedExecutor


def test_one_callback(benchmark):
ONE_THOUSAND = 1000


def test_one_thousand_timer_coroutine_callbacks(benchmark):
context = rclpy.context.Context()
rclpy.init(context=context)
try:
node = rclpy.create_node('benchmark_many_timer_calls', context=context)
called = False
num_calls = 0

def timer_cb():
nonlocal called
called = True
async def timer_coro():
nonlocal num_calls
num_calls += 1

timer = node.create_timer(0, timer_cb)
timer = node.create_timer(0, timer_coro)

executor = SingleThreadedExecutor(context=context)
executor.add_node(node)


def bm():
nonlocal called
while not called:
nonlocal num_calls
# Reset for each benchmark run
num_calls = 0
while num_calls < ONE_THOUSAND:
executor.spin_once(timeout_sec=0)
return called
return num_calls == ONE_THOUSAND

assert benchmark(bm)

Expand All @@ -49,27 +52,29 @@ def bm():
rclpy.shutdown(context=context)


def test_one_coroutine_callback(benchmark):
def test_one_thousand_timer_callbacks(benchmark):
context = rclpy.context.Context()
rclpy.init(context=context)
try:
node = rclpy.create_node('benchmark_many_timer_calls', context=context)
called = False
num_calls = 0

async def timer_coro():
nonlocal called
called = True
def timer_cb():
nonlocal num_calls
num_calls += 1

timer = node.create_timer(0, timer_coro)
timer = node.create_timer(0, timer_cb)

executor = SingleThreadedExecutor(context=context)
executor.add_node(node)

def bm():
nonlocal called
while not called:
nonlocal num_calls
# Reset for each benchmark run
num_calls = 0
while num_calls < ONE_THOUSAND:
executor.spin_once(timeout_sec=0)
return called
return num_calls == ONE_THOUSAND

assert benchmark(bm)

Expand Down

0 comments on commit 60cbd2c

Please sign in to comment.