diff --git a/route_guidance_ros/scripts/Dummy_Speech_Recognition.py b/route_guidance_ros/scripts/Dummy_Speech_Recognition.py old mode 100644 new mode 100755 index 66923e746e07b324c31b992473d7281ae1678692..8c964403f95ab00a1c9f73094dd708d03540fe16 --- a/route_guidance_ros/scripts/Dummy_Speech_Recognition.py +++ b/route_guidance_ros/scripts/Dummy_Speech_Recognition.py @@ -125,8 +125,8 @@ class Speech_Recognition(Event, Command, Query): return status def wait_and_send_speech_recognized_event(self): - detected_text = "point_a" - delay_duration = random.randint(3, 10) + detected_text = input("SR >>> ") + delay_duration = random.randint(0, 5) time.sleep(delay_duration) self.speech_recognized(datetime.now(), [detected_text]) @@ -145,24 +145,12 @@ class component: self._state = False -def event_dispatch(sr): - """event_dispatch - """ - sr.speech_recognized(datetime.now().isoformat(), [""]) - time.sleep(0.5) - sr.speech_recognized(datetime.now().isoformat(), [""]) - - def example_sr(port): """example_sr """ c = component() sr = Speech_Recognition(c) - # start the timer to dispatch events - t = threading.Timer(3, event_dispatch, args=(sr,)) - t.start() - # start the XML-RPC server server = ThreadingXMLRPCServer(("0.0.0.0", port), logRequests=False) server.register_instance(sr) diff --git a/route_guidance_ros/scripts/VirtualNavigation.py b/route_guidance_ros/scripts/VirtualNavigation.py index bf7a35a8288f459fce08c279e0a7376c9ba3b392..1b9cb0c2227180cfb60615a2d75fa5b08d14cf97 100755 --- a/route_guidance_ros/scripts/VirtualNavigation.py +++ b/route_guidance_ros/scripts/VirtualNavigation.py @@ -126,7 +126,7 @@ class VirtualNavigation(Event, Command, Query): status = self._component.send_goal_to_robot_async(goal_coord, robot_name) self.nav_state_table[robot_name] = status print(self.nav_state_table) - msg = xmlrpclib.dumps((goal, robot_name, status.value), 'navi_finished') + msg = xmlrpclib.dumps((goal, robot_name, status.value), 'nav_finished') self.event_queue.put(msg) def back_stucked_robots_to_init_pos(self): diff --git a/route_guidance_ros/scripts/Virtual_Speech_Recognition.py b/route_guidance_ros/scripts/Virtual_Speech_Recognition.py new file mode 100755 index 0000000000000000000000000000000000000000..245c21a482912af66f88afcf9d6253749b74ca44 --- /dev/null +++ b/route_guidance_ros/scripts/Virtual_Speech_Recognition.py @@ -0,0 +1,176 @@ +# (Dummy) Speech_Recognition.py +# + +"""Speech_Recognition +""" + +import sys +import queue +import time +from datetime import datetime +import threading + +from pyrois import RoIS_Common, RoIS_HRI + +if sys.version_info.major == 2: + import SocketServer + import SimpleXMLRPCServer + + class ThreadingXMLRPCServer(SocketServer.ThreadingMixIn, SimpleXMLRPCServer.SimpleXMLRPCServer): + pass +else: + import socketserver + import xmlrpc.server + + class ThreadingXMLRPCServer(socketserver.ThreadingMixIn, xmlrpc.server.SimpleXMLRPCServer): + """ThreadingXMLRPCServer + """ + pass + + +class Command(RoIS_Common.Command): + """Command + """ + def __init__(self, c): + self._component = c + + def start(self): + status = RoIS_HRI.ReturnCode_t.OK.value + return status + + def stop(self): + status = RoIS_HRI.ReturnCode_t.OK.value + return status + + def suspend(self): + status = RoIS_HRI.ReturnCode_t.OK.value + return status + + def resume(self): + status = RoIS_HRI.ReturnCode_t.OK.value + return status + + def set_parameter(self, languages, grammer, rule): + self._component.Recognizable_Languages = recognizable_languages + self._component.Languages = languages + self._component.Grammer = grammer + self._component.Rule = rule + status = RoIS_HRI.ReturnCode_t.OK.value + return status + + +class Query(RoIS_Common.Query): + """Query + """ + def __init__(self, c): + self._component = c + + def component_status(self): + status = RoIS_HRI.ReturnCode_t.OK.value + c_status = RoIS_Common.Component_Status.UNINITIALIZED.value + return (status, c_status) + + def get_parameter(self): + status = RoIS_HRI.ReturnCode_t.OK.value + return (status, self._component.Languages, self._component.Grammer, self._component.Rule) + + +class Event(RoIS_Common.Event): + """Event + """ + def __init__(self, c): + self._component = c + self.event_queue = queue.Queue() + + def poll_event(self): + """poll_event + """ + msg = self.event_queue.get() + return msg + + def speech_recognized(self, timestamp, recognized_text): + """speech_recognition + """ + msg = xmlrpc.client.dumps((timestamp, recognized_text), 'speech_recognized') + self.event_queue.put(msg) + + def speech_input_started(self, timestamp): + msg = xmlrpc.client.dumps((timestamp), 'speech_input_started') + self.event_queue.put(msg) + + def speech_input_finished(self, timestamp): + msg = xmlrpc.client.dumps((timestamp), 'speech_input_finished') + self.event_queue.put(msg) + +import random + +class Speech_Recognition(Event, Command, Query): + """Speech_Recognition + """ + def __init__(self, c, robot_names): + super().__init__(c) + self._robot_names = robot_names + self._target_robot = "robot1" + self._component = c + self._component.Recognizable_Languages = set("") + self._component.Languages = set("English") + self._component.Grammer = "" #??? + self._component.Rule = "" #??? + + def set_target_robot(self, robot_name): + if not robot_name in self._robot_names: + raise RuntimeError("[SR-server] Invalid robot name.") + self._target_robot = robot_name + print("target robot is set as ", self._target_robot) + + def set_parameter(self, languages, grammer, rule): + print("Speech_Recognition::set_parameter called.") + th = threading.Thread( + target=self.wait_and_send_speech_recognized_event, + daemon=True) + th.start() + status = RoIS_HRI.ReturnCode_t.OK.value + return status + + def wait_and_send_speech_recognized_event(self): + detected_text = input("SR >>> ") + delay_duration = random.randint(0, 5) + time.sleep(delay_duration) + self.speech_recognized(datetime.now(), [detected_text]) + + def speech_recognized(self, timestamp, recognized_text): + """speech_recognition + """ + msg = (timestamp, recognized_text, self._target_robot), 'speech_recognized' + msg = xmlrpc.client.dumps(*msg) + self.event_queue.put(msg) + + +class component: + """component + """ + def __init__(self): + self._state = False + + +def example_sr(port, robot_name): + """example_sr + """ + c = component() + sr = Speech_Recognition(c, robot_name) + + # start the XML-RPC server + server = ThreadingXMLRPCServer(("0.0.0.0", port), logRequests=False) + server.register_instance(sr) + server.register_introspection_functions() + server.register_multicall_functions() + server.serve_forever() + + +if __name__ == '__main__': + if len(sys.argv) < 3: + print("python3 ...") + raise RuntimeError + + port, robot_names = int(sys.argv[1]), sys.argv[2:] + example_sr(port, robot_names) diff --git a/route_guidance_ros/scripts/Virtual_Speech_Recognition_client.py b/route_guidance_ros/scripts/Virtual_Speech_Recognition_client.py new file mode 100644 index 0000000000000000000000000000000000000000..4fd660898cc0edc3cee018e925346df0eac04115 --- /dev/null +++ b/route_guidance_ros/scripts/Virtual_Speech_Recognition_client.py @@ -0,0 +1,125 @@ +# (Dummy) Speech_Recognition_Client.py + +"""Speech_Recognition_Client +""" + +import threading +# import logging +import xmlrpc.client + +from pyrois import RoIS_Common, RoIS_HRI + + +class Command(RoIS_Common.Command): + """Command + """ + def __init__(self, uri): + self._uri = uri + self._proxy = xmlrpc.client.ServerProxy(self._uri) + + def start(self): + status = RoIS_HRI.ReturnCode_t(self._proxy.start()) + return status + + def stop(self): + status = RoIS_HRI.ReturnCode_t(self._proxy.stop()) + return status + + def suspend(self): + status = RoIS_HRI.ReturnCode_t(self._proxy.suspend()) + return status + + def resume(self): + status = RoIS_HRI.ReturnCode_t(self._proxy.resume()) + return status + + def set_parameter(self, languages, grammer, rule): + s = self._proxy.set_parameter(languages, grammer, rule) + status = RoIS_HRI.ReturnCode_t(s) + return status + + +class Query(RoIS_Common.Query): + """Query + """ + def __init__(self, uri): + self._uri = uri + self._proxy = xmlrpc.client.ServerProxy(self._uri) + + def component_status(self): + (status, c_status) = self._proxy.component_status() + return (RoIS_HRI.ReturnCode_t(status), RoIS_Common.Component_Status(c_status)) + + def get_parameter(self): + (status, recognizable_languages, languages, grammer, rule) = self._proxy.get_parameter() + return (RoIS_HRI.ReturnCode_t(status), recognizable_languages, languages, grammer, rule) + + +class Event(RoIS_Common.Event): + """Event + """ + def __init__(self, uri): + self._uri = uri + self._e_proxy = xmlrpc.client.ServerProxy(self._uri) + self.events = [] + # if logger is not None: + # self.logger = logger + + def start_th(self): + self.th = threading.Thread(target=self.event_loop) + self.th.start() + + def event_loop(self): + """event_loop + """ + while True: + try: + self.poll_event() + except ConnectionRefusedError: + break + + def poll_event(self): + """poll_event + """ + msg = self._e_proxy.poll_event() + (params, methodname) = xmlrpc.client.loads(msg) + #self.logger.debug('poll_event: '+methodname) + print(params,methodname) + if methodname == 'speech_recognized': + self.speech_recognized(params[0], params[1]) + + def speech_recognized(self, timestamp, recognized_text): + self.events.append((timestamp,recognized_text)) + print("speech_recognized",timestamp, recognized_text) + # self.logger.debug('received completed event' + # + command_id + # + RoIS_Service.Completed_Status(status).name) + + def speech_input_started(self, timestamp): + self.events.append((timestamp)) + print("speech_input_started", timestamp) + + def speech_input_finished(self, timestamp): + self.events.append((timestamp)) + print("speech_input_started", timestamp) + + +class Speech_Recognition_Client(Command, Query, Event): + """Speech_Recognition_Client + """ + def __init__(self, uri, engine_event_queue): + self._uri = uri + self._proxy = xmlrpc.client.ServerProxy(self._uri) + self._e_proxy = xmlrpc.client.ServerProxy(self._uri) + self.events = [] + self.engine_event_queue = engine_event_queue + self.start_th() + + def poll_event(self): + """poll_event + """ + msg = self._e_proxy.poll_event() + self.engine_event_queue.put(msg) + + def set_target_robot(self, robot_name): + self._e_proxy.set_target_robot(robot_name) diff --git a/route_guidance_ros/scripts/main_engine.py b/route_guidance_ros/scripts/main_engine.py index eb2f3f9d55e765fa85a2899e4ee075f386808470..a14ae7dbac25426094ad6af840e9d93e43e176f2 100644 --- a/route_guidance_ros/scripts/main_engine.py +++ b/route_guidance_ros/scripts/main_engine.py @@ -169,7 +169,7 @@ class EventIF(RoIS_HRI.EventIF): return (status, results) from VirtualNavigation_client import VirtualNavigation_Client as VNavClient -from Dummy_Speech_Recognition_client import Speech_Recognition_Client as SRecogClient +from Virtual_Speech_Recognition_client import Speech_Recognition_Client as SRecogClient from sub_engine_client import IF as EngineClient from pyrois.RoIS_Common import Component_Status from pyrois.RoIS_Service import Completed_Status @@ -197,6 +197,9 @@ class IF(SystemIF, CommandIF, QueryIF, EventIF, Service_Application_Base): for engine_client in self.engine_clients.values(): engine_client.connect() self.command_id_result_table = {} + self.next_sr_robot = "robot1" + self.sr_robot_command_table = {} + th = threading.Thread(target=self.analyze_c_status, daemon=True) th.start() @@ -216,34 +219,50 @@ class IF(SystemIF, CommandIF, QueryIF, EventIF, Service_Application_Base): self.command_id_result_table[str(self.command_id)].append(results) status = RoIS_HRI.ReturnCode_t.OK.value elif component_ref == 'Speech_Recognition': - pass + target_component = self.component_clients[component_ref] + self.sr_robot_command_table[self.next_sr_robot] = self.command_id + status = target_component.set_parameter(*parameters).value return (status, str(self.command_id)) - def get_command_result(self, command_id, condition): - status = RoIS_HRI.ReturnCode_t.OK.value - return status, self.command_id_result_table[command_id] - def analyze_c_status(self): while True: msg = self.component_event_queue.get() - (target_pos, robot_name, status), methodname = xmlrpc.client.loads(msg) - command_id = self.goal_command_table[str(target_pos)] - status = Component_Status(status) - if status == Component_Status.READY: - print(colored("Command (id: {}) was succeeded by {} which reached goal {}."\ - .format(command_id, robot_name, target_pos), "green")) + params, methodname = xmlrpc.client.loads(msg) + print(f"Received event: methodname => {methodname}, params => {params}") + if methodname == 'nav_finished': + target_pos, robot_name, status = params + command_id = self.goal_command_table[str(target_pos)] + status = Component_Status(status) + if status == Component_Status.READY: + print(colored("Command (id: {}) was succeeded by {} which reached goal {}."\ + .format(command_id, robot_name, target_pos), "green")) + # sr_component = self.component_clients['Speech_Recognition'] + # sr_component.set_target_robot(robot_name) + self.completed(command_id, Completed_Status.OK.value) + del self.goal_command_table[str(target_pos)] + time.sleep(3) + elif status == Component_Status.ERROR: + print(colored("Command (id: {}) was aborted by {} which couldn't reach goal {}."\ + .format(command_id, robot_name, target_pos), "red")) + self.completed(command_id, Completed_Status.ABORT.value) + else: + raise RuntimeError("Unhandled kind of vnav-status.") + elif methodname == 'speech_recognized': + print("Speech_Recognition EVENT!!!") + timestamp, recognized_text, target_robot = params + command_id = self.sr_robot_command_table[target_robot] + self.command_id_result_table[str(command_id)] = recognized_text[0] self.completed(command_id, Completed_Status.OK.value) - del self.goal_command_table[str(target_pos)] - time.sleep(3) - elif status == Component_Status.ERROR: - print(colored("Command (id: {}) was aborted by {} which couldn't reach goal {}."\ - .format(command_id, robot_name, target_pos), "red")) - self.completed(command_id, Completed_Status.ABORT.value) else: - raise RuntimeError("Unhandled kind of vnav-status.") + raise RuntimeError("[main_engine]Unknown method-name of event.") time.sleep(3) + def get_command_result(self, command_id, condition): + status = RoIS_HRI.ReturnCode_t.OK.value + results = self.command_id_result_table[command_id] + return (status, results) + class IF_server: """IF_Server diff --git a/route_guidance_ros/scripts/service_app_lv2.py b/route_guidance_ros/scripts/service_app_lv2.py index f5b54273711a088cddf7e37f0d967288792d89cc..8cf8f76cea8df75d966f8373c3e023c1a478ad21 100644 --- a/route_guidance_ros/scripts/service_app_lv2.py +++ b/route_guidance_ros/scripts/service_app_lv2.py @@ -17,6 +17,7 @@ from transitions import Machine class Task(enum.Enum): Init = 'Init' NaviToUser = 'NaviToUser' + ListenDest = 'ListenDest' NaviToGuide = 'NaviToGuide' End = 'End' @@ -25,23 +26,25 @@ class Service(Service_Application_IF): def __init__(self, uri="http://localhost:8000", logger=None): super().__init__(uri) # buid state machine - states = [Task.Init, Task.NaviToUser, Task.NaviToGuide, Task.End] + states = [Task.Init, Task.NaviToUser, Task.ListenDest, Task.NaviToGuide, Task.End] transitions = [ - {'trigger': 'StartService', 'source': Task.Init, 'dest': Task.NaviToUser, 'after': 'start_navi_to_user' }, - {'trigger': 'NaviSucceeded', 'source': Task.NaviToUser, 'dest': Task.NaviToGuide, 'after': 'start_navi_to_guide'}, - {'trigger': 'NaviFailed', 'source': Task.NaviToUser, 'dest': Task.NaviToUser, 'after': 'start_navi_to_user' }, - {'trigger': 'NaviSucceeded', 'source': Task.NaviToGuide, 'dest': Task.End, 'after': 'end_service' }, + {'trigger': 'Start', 'source': Task.Init, 'dest': Task.NaviToUser, 'after': 'start_navi_to_user' }, + {'trigger': 'Succeeded', 'source': Task.NaviToUser, 'dest': Task.ListenDest, 'after': 'listen_destination' }, + {'trigger': 'Succeeded', 'source': Task.ListenDest, 'dest': Task.NaviToGuide, 'after': 'start_navi_to_guide'}, + {'trigger': 'Failed', 'source': Task.NaviToUser, 'dest': Task.NaviToUser, 'after': 'start_navi_to_user' }, + {'trigger': 'Succeeded', 'source': Task.NaviToGuide, 'dest': Task.End, 'after': 'end_service' }, ] self.target_pos = "point_c" - self.dest_pos = "point_a" self.machine = Machine(self, states=states, transitions=transitions, initial=Task.Init, send_event=True) + self.command_task_table = {} def completed(self, command_id, completed_status): + print("command_id: ", command_id, " completed.") completed_status = Completed_Status(completed_status) if completed_status == Completed_Status.OK: - self.NaviSucceeded(self.dest_pos) + self.Succeeded() elif completed_status == Completed_Status.ABORT: - self.NaviFailed(self.target_pos) + self.Failed(self.target_pos) def notify_error(self, error_id, error_type): print('received error event {}({}) '.format( \ @@ -55,12 +58,22 @@ class Service(Service_Application_IF): dest = event.args[0] print(colored("NaviToUser started.", 'cyan')) (return_code, command_id) = self._proxy.set_parameter('Navigation', dest, "", "") + self.command_task_table[command_id] = Task.NaviToUser stautus = RoIS_HRI.ReturnCode_t(return_code) + def listen_destination(self, event): + print(colored("ListenDest started.", 'cyan')) + (return_code, command_id) = self._proxy.set_parameter('Speech_Recognition', "", "", "") + self.command_task_table[command_id] = Task.ListenDest + def start_navi_to_guide(self, event): - dest = event.args[0] + latest_listen_command_id = [command_id for command_id, task in self.command_task_table.items() if task == Task.ListenDest][-1] + status, results = self._proxy.get_command_result(latest_listen_command_id, "") + dest = results + print("dest is ", dest) print(colored("NaviToGuide started.", 'cyan')) (return_code, command_id) = self._proxy.set_parameter('Navigation', dest, "", "") + self.command_task_table[command_id] = Task.NaviToGuide stautus = RoIS_HRI.ReturnCode_t(return_code) def end_service(self, event): @@ -69,7 +82,7 @@ class Service(Service_Application_IF): def run(self): self._proxy = xmlrpc.client.ServerProxy(self._uri) time.sleep(3) - self.StartService(self.target_pos) + self.Start(self.target_pos) if __name__ == '__main__': diff --git a/route_guidance_ros/scripts/test_main_engine_with_service.py b/route_guidance_ros/scripts/test_main_engine_with_service.py index 73e97a1cda691af9de30f4b45fc2883964334dfe..cecacd265714fe69db7c230fa2d3c857b01ccc5f 100644 --- a/route_guidance_ros/scripts/test_main_engine_with_service.py +++ b/route_guidance_ros/scripts/test_main_engine_with_service.py @@ -47,10 +47,18 @@ class Service(Service_Application_IF): def get_robot_positions(self): return self.robot_positions + def speech_recognition(self, robot_name): + self._proxy = xmlrpc.client.ServerProxy(self._uri) + time.sleep(3) + (return_code, command_id) = self._proxy.set_parameter('Speech_Recognition', "English", "", "") + time.sleep(15) + + def run(self): destinations = [ "point_b", "point_h", [5, 0, 0], "point_e", "point_a", [0, 5, 0] ] + self.speech_recognition("robot3") for dest in destinations: self.go_robot_to(dest) while self.is_running: diff --git a/route_guidance_ros/scripts/utilities.py b/route_guidance_ros/scripts/utilities.py index 75a600dd81262e131a7d8dca641af039487b96e4..789eeeca0e6622325c3705c1e9e5cf6e2a093380 100644 --- a/route_guidance_ros/scripts/utilities.py +++ b/route_guidance_ros/scripts/utilities.py @@ -78,6 +78,12 @@ def setup_multi_robot(): ] process.append(SubprocessWrapper(command)) print("Virtual Navigation Component Constructed.") + command = [ + "python3", "Virtual_Speech_Recognition.py", + "8042", "robot1", "robot2", "robot3" + ] + process.append(SubprocessWrapper(command)) + print("Virtual Speech-Recognition Component Constructed.") time.sleep(5) process.append(MainEngineWrapper()) print("Main Engine Constructed.")