This is the mail archive of the gdb-patches@sourceware.org mailing list for the GDB project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

eliminate target_async_mask


Since target_wait takes a TARGET_WNOHANG option, we don't need the
target_async_mask hack anymore.  Instead, we can make callers that
really want a synchronous wait call wait_for_inferior (currently
only infcalls).

This drops a bunch of cruft from the targets' side.  

One change to fetch_inferior_event was necessary, as revealed by tests
that do:

(gdb) b foo
(gdb) c
(gdb) up
(gdb) call function()
(gdb) frame

This should print frame #1, but, delayed events can appear
at fetch_inferior_event (e.g, a SIGCHLD happens while in a
blocking target_wait call, and then when the event loop gets
a chance to process this event, the corresponding stop had
already been reported to the core, synchronously, so the target
returns TARGET_WAITKIND_IGNORE [or in other words
waitpid(-1, WNOHANG) returns 0]), we'd be flushing the register
and frame caches, thus losing the selected frame.  These delayed
events happen frequently in non-stop mode, but we never saw
a problem before because in non-stop mode, fetch_inferior_event
makes sure the selected thread and its frame are always restored
after handling an event.

Tested on x86_64-linux, native and gdbserver, sync and async.

Checked in.

-- 
Pedro Alves

2011-06-06  Pedro Alves  <pedro@codesourcery.com>

	gdb/
	* infcall.c (run_inferior_call): Don't mask async.  Instead force
	a synchronous wait, if the target can async.

	* target.h (struct target_ops): Delete to_async_mask.
	(target_async_mask): Delete.
	* target.c (update_current_target): Delete references to to_async_mask.
	* linux-nat.c (linux_nat_async_mask_value): Delete.
	(linux_nat_is_async_p, linux_nat_can_async_p): Remove references
	to linux_nat_async_mask_value.
	(linux_nat_async_mask): Delete.
	(linux_nat_async, linux_nat_close): Remove references to
	linux_nat_async_mask_value.
	* record.c (record_async_mask_value): Delete.
	(record_async): Remove references to record_async_mask_value.
	(record_async_mask): Delete.
	(record_can_async_p, record_is_async_p): Remove references to
	record_async_mask_value.
	(init_record_ops, init_record_core_ops): Remove references to
	record_async_mask.
	* remote.c (remote_async_mask_value): Delete.
	(init_remote_ops): Remove reference to remote_async_mask.
	(remote_can_async_p, remote_is_async_p): Remove references to
	remote_async_mask_value.
	(remote_async): Remove references to remote_async_mask_value.
	(remote_async_mask): Delete.

	* infrun.c (fetch_inferior_event): Don't claim registers changed
	if the current thread is already not executing.

---
 gdb/infcall.c   |   26 +++++++++++-------------
 gdb/infrun.c    |   11 +++++++++-
 gdb/linux-nat.c |   60 +++-----------------------------------------------------
 gdb/record.c    |   27 +------------------------
 gdb/remote.c    |   22 +-------------------
 gdb/target.c    |    4 ---
 gdb/target.h    |   18 ----------------
 7 files changed, 30 insertions(+), 138 deletions(-)

Index: src/gdb/infcall.c
===================================================================
--- src.orig/gdb/infcall.c	2011-06-06 13:19:14.068860570 +0100
+++ src/gdb/infcall.c	2011-06-06 13:41:31.148861014 +0100
@@ -387,10 +387,8 @@ static struct gdb_exception
 run_inferior_call (struct thread_info *call_thread, CORE_ADDR real_pc)
 {
   volatile struct gdb_exception e;
-  int saved_async = 0;
   int saved_in_infcall = call_thread->control.in_infcall;
   ptid_t call_thread_ptid = call_thread->ptid;
-  char *saved_target_shortname = xstrdup (target_shortname);
 
   call_thread->control.in_infcall = 1;
 
@@ -401,22 +399,24 @@ run_inferior_call (struct thread_info *c
   /* We want stop_registers, please...  */
   call_thread->control.proceed_to_finish = 1;
 
-  if (target_can_async_p ())
-    saved_async = target_async_mask (0);
-
   TRY_CATCH (e, RETURN_MASK_ALL)
-    proceed (real_pc, TARGET_SIGNAL_0, 0);
+    {
+      proceed (real_pc, TARGET_SIGNAL_0, 0);
+
+      /* Inferior function calls are always synchronous, even if the
+	 target supports asynchronous execution.  Do here what
+	 `proceed' itself does in sync mode.  */
+      if (target_can_async_p () && is_running (inferior_ptid))
+	{
+	  wait_for_inferior ();
+	  normal_stop ();
+	}
+    }
 
   /* At this point the current thread may have changed.  Refresh
      CALL_THREAD as it could be invalid if its thread has exited.  */
   call_thread = find_thread_ptid (call_thread_ptid);
 
-  /* Don't restore the async mask if the target has changed,
-     saved_async is for the original target.  */
-  if (saved_async
-      && strcmp (saved_target_shortname, target_shortname) == 0)
-    target_async_mask (saved_async);
-
   enable_watchpoints_after_interactive_call_stop ();
 
   /* Call breakpoint_auto_delete on the current contents of the bpstat
@@ -433,8 +433,6 @@ run_inferior_call (struct thread_info *c
   if (call_thread != NULL)
     call_thread->control.in_infcall = saved_in_infcall;
 
-  xfree (saved_target_shortname);
-
   return e;
 }
 
Index: src/gdb/target.h
===================================================================
--- src.orig/gdb/target.h	2011-06-06 13:19:14.068860570 +0100
+++ src/gdb/target.h	2011-06-06 13:41:31.148861014 +0100
@@ -524,7 +524,6 @@ struct target_ops
     int (*to_can_async_p) (void);
     int (*to_is_async_p) (void);
     void (*to_async) (void (*) (enum inferior_event_type, void *), void *);
-    int (*to_async_mask) (int);
     int (*to_supports_non_stop) (void);
     /* find_memory_regions support method for gcore */
     int (*to_find_memory_regions) (find_memory_region_ftype func, void *data);
@@ -1255,23 +1254,6 @@ int target_supports_non_stop (void);
 #define target_async(CALLBACK,CONTEXT) \
      (current_target.to_async ((CALLBACK), (CONTEXT)))
 
-/* This is to be used ONLY within call_function_by_hand().  It provides
-   a workaround, to have inferior function calls done in sychronous
-   mode, even though the target is asynchronous.  After
-   target_async_mask(0) is called, calls to target_can_async_p() will
-   return FALSE , so that target_resume() will not try to start the
-   target asynchronously.  After the inferior stops, we IMMEDIATELY
-   restore the previous nature of the target, by calling
-   target_async_mask(1).  After that, target_can_async_p() will return
-   TRUE.  ANY OTHER USE OF THIS FEATURE IS DEPRECATED.
-
-   FIXME ezannoni 1999-12-13: we won't need this once we move
-   the turning async on and off to the single execution commands,
-   from where it is done currently, in remote_resume().  */
-
-#define target_async_mask(MASK)	\
-  (current_target.to_async_mask (MASK))
-
 #define target_execution_direction() \
   (current_target.to_execution_direction ())
 
Index: src/gdb/target.c
===================================================================
--- src.orig/gdb/target.c	2011-06-06 13:19:14.068860570 +0100
+++ src/gdb/target.c	2011-06-06 13:41:31.228861014 +0100
@@ -659,7 +659,6 @@ update_current_target (void)
       INHERIT (to_can_async_p, t);
       INHERIT (to_is_async_p, t);
       INHERIT (to_async, t);
-      INHERIT (to_async_mask, t);
       INHERIT (to_find_memory_regions, t);
       INHERIT (to_make_corefile_notes, t);
       INHERIT (to_get_bookmark, t);
@@ -829,9 +828,6 @@ update_current_target (void)
   de_fault (to_async,
 	    (void (*) (void (*) (enum inferior_event_type, void*), void*))
 	    tcomplain);
-  de_fault (to_async_mask,
-	    (int (*) (int))
-	    return_one);
   de_fault (to_thread_architecture,
 	    default_thread_architecture);
   current_target.to_read_description = NULL;
Index: src/gdb/linux-nat.c
===================================================================
--- src.orig/gdb/linux-nat.c	2011-06-06 13:19:14.068860570 +0100
+++ src/gdb/linux-nat.c	2011-06-06 13:41:31.278861014 +0100
@@ -250,15 +250,11 @@ static int linux_supports_tracesysgood_f
 
 static int linux_supports_tracevforkdone_flag = -1;
 
-/* Async mode support.  */
-
-/* Zero if the async mode, although enabled, is masked, which means
-   linux_nat_wait should behave as if async mode was off.  */
-static int linux_nat_async_mask_value = 1;
-
 /* Stores the current used ptrace() options.  */
 static int current_ptrace_options = 0;
 
+/* Async mode support.  */
+
 /* The read/write ends of the pipe registered as waitable file in the
    event loop.  */
 static int linux_nat_event_pipe[2] = { -1, -1 };
@@ -306,7 +302,6 @@ static void linux_nat_async (void (*call
 			     (enum inferior_event_type event_type,
 			      void *context),
 			     void *context);
-static int linux_nat_async_mask (int mask);
 static int kill_lwp (int lwpid, int signo);
 
 static int stop_callback (struct lwp_info *lp, void *data);
@@ -5359,11 +5354,7 @@ linux_nat_is_async_p (void)
   /* NOTE: palves 2008-03-21: We're only async when the user requests
      it explicitly with the "set target-async" command.
      Someday, linux will always be async.  */
-  if (!target_async_permitted)
-    return 0;
-
-  /* See target.h/target_async_mask.  */
-  return linux_nat_async_mask_value;
+  return target_async_permitted;
 }
 
 /* target_can_async_p implementation.  */
@@ -5374,11 +5365,7 @@ linux_nat_can_async_p (void)
   /* NOTE: palves 2008-03-21: We're only async when the user requests
      it explicitly with the "set target-async" command.
      Someday, linux will always be async.  */
-  if (!target_async_permitted)
-    return 0;
-
-  /* See target.h/target_async_mask.  */
-  return linux_nat_async_mask_value;
+  return target_async_permitted;
 }
 
 static int
@@ -5398,37 +5385,6 @@ linux_nat_supports_multi_process (void)
   return linux_multi_process;
 }
 
-/* target_async_mask implementation.  */
-
-static int
-linux_nat_async_mask (int new_mask)
-{
-  int curr_mask = linux_nat_async_mask_value;
-
-  if (curr_mask != new_mask)
-    {
-      if (new_mask == 0)
-	{
-	  linux_nat_async (NULL, 0);
-	  linux_nat_async_mask_value = new_mask;
-	}
-      else
-	{
-	  linux_nat_async_mask_value = new_mask;
-
-	  /* If we're going out of async-mask in all-stop, then the
-	     inferior is stopped.  The next resume will call
-	     target_async.  In non-stop, the target event source
-	     should be always registered in the event loop.  Do so
-	     now.  */
-	  if (non_stop)
-	    linux_nat_async (inferior_event_handler, 0);
-	}
-    }
-
-  return curr_mask;
-}
-
 static int async_terminal_is_ours = 1;
 
 /* target_terminal_inferior implementation.  */
@@ -5555,10 +5511,6 @@ static void
 linux_nat_async (void (*callback) (enum inferior_event_type event_type,
 				   void *context), void *context)
 {
-  if (linux_nat_async_mask_value == 0 || !target_async_permitted)
-    internal_error (__FILE__, __LINE__,
-		    "Calling target_async when async is masked");
-
   if (callback != NULL)
     {
       async_client_callback = callback;
@@ -5651,9 +5603,6 @@ linux_nat_close (int quitting)
   if (target_is_async_p ())
     target_async (NULL, 0);
 
-  /* Reset the async_masking.  */
-  linux_nat_async_mask_value = 1;
-
   if (linux_ops->to_close)
     linux_ops->to_close (quitting);
 }
@@ -5800,7 +5749,6 @@ linux_nat_add_target (struct target_ops
   t->to_is_async_p = linux_nat_is_async_p;
   t->to_supports_non_stop = linux_nat_supports_non_stop;
   t->to_async = linux_nat_async;
-  t->to_async_mask = linux_nat_async_mask;
   t->to_terminal_inferior = linux_nat_terminal_inferior;
   t->to_terminal_ours = linux_nat_terminal_ours;
   t->to_close = linux_nat_close;
Index: src/gdb/record.c
===================================================================
--- src.orig/gdb/record.c	2011-06-06 13:19:14.068860570 +0100
+++ src/gdb/record.c	2011-06-06 13:41:31.278861014 +0100
@@ -1817,16 +1817,10 @@ record_goto_bookmark (gdb_byte *bookmark
   return;
 }
 
-static int record_async_mask_value = 1;
-
 static void
 record_async (void (*callback) (enum inferior_event_type event_type,
 				void *context), void *context)
 {
-  if (record_async_mask_value == 0)
-    internal_error (__FILE__, __LINE__,
-		    _("Calling record_async when async is masked"));
-
   /* If we're on top of a line target (e.g., linux-nat, remote), then
      set it to async mode as well.  Will be NULL if we're sitting on
      top of the core target, for "record restore".  */
@@ -1835,32 +1829,17 @@ record_async (void (*callback) (enum inf
 }
 
 static int
-record_async_mask (int new_mask)
-{
-  int curr_mask = record_async_mask_value;
-
-  record_async_mask_value = new_mask;
-  return curr_mask;
-}
-
-static int
 record_can_async_p (void)
 {
   /* We only enable async when the user specifically asks for it.  */
-  if (!target_async_permitted)
-    return 0;
-
-  return record_async_mask_value;
+  return target_async_permitted;
 }
 
 static int
 record_is_async_p (void)
 {
   /* We only enable async when the user specifically asks for it.  */
-  if (!target_async_permitted)
-    return 0;
-
-  return record_async_mask_value;
+  return target_async_permitted;
 }
 
 static enum exec_direction_kind
@@ -1899,7 +1878,6 @@ init_record_ops (void)
   record_ops.to_async = record_async;
   record_ops.to_can_async_p = record_can_async_p;
   record_ops.to_is_async_p = record_is_async_p;
-  record_ops.to_async_mask = record_async_mask;
   record_ops.to_execution_direction = record_execution_direction;
   record_ops.to_magic = OPS_MAGIC;
 }
@@ -2125,7 +2103,6 @@ init_record_core_ops (void)
   record_core_ops.to_async = record_async;
   record_core_ops.to_can_async_p = record_can_async_p;
   record_core_ops.to_is_async_p = record_is_async_p;
-  record_core_ops.to_async_mask = record_async_mask;
   record_core_ops.to_execution_direction = record_execution_direction;
   record_core_ops.to_magic = OPS_MAGIC;
 }
Index: src/gdb/remote.c
===================================================================
--- src.orig/gdb/remote.c	2011-06-06 13:19:14.068860570 +0100
+++ src/gdb/remote.c	2011-06-06 13:41:31.288861014 +0100
@@ -134,8 +134,6 @@ static int remote_is_async_p (void);
 static void remote_async (void (*callback) (enum inferior_event_type event_type,
 					    void *context), void *context);
 
-static int remote_async_mask (int new_mask);
-
 static void remote_detach (struct target_ops *ops, char *args, int from_tty);
 
 static void remote_interrupt (int signo);
@@ -721,8 +719,6 @@ static struct target_ops remote_ops;
 
 static struct target_ops extended_remote_ops;
 
-static int remote_async_mask_value = 1;
-
 /* FIXME: cagney/1999-09-23: Even though getpkt was called with
    ``forever'' still use the normal timeout mechanism.  This is
    currently used by the ASYNC code to guarentee that target reads
@@ -10359,7 +10355,6 @@ Specify the serial device it is connecte
   remote_ops.to_can_async_p = remote_can_async_p;
   remote_ops.to_is_async_p = remote_is_async_p;
   remote_ops.to_async = remote_async;
-  remote_ops.to_async_mask = remote_async_mask;
   remote_ops.to_terminal_inferior = remote_terminal_inferior;
   remote_ops.to_terminal_ours = remote_terminal_ours;
   remote_ops.to_supports_non_stop = remote_supports_non_stop;
@@ -10426,7 +10421,7 @@ remote_can_async_p (void)
     return 0;
 
   /* We're async whenever the serial device is.  */
-  return remote_async_mask_value && serial_can_async_p (remote_desc);
+  return serial_can_async_p (remote_desc);
 }
 
 static int
@@ -10437,7 +10432,7 @@ remote_is_async_p (void)
     return 0;
 
   /* We're async whenever the serial device is.  */
-  return remote_async_mask_value && serial_is_async_p (remote_desc);
+  return serial_is_async_p (remote_desc);
 }
 
 /* Pass the SERIAL event on and up to the client.  One day this code
@@ -10473,10 +10468,6 @@ static void
 remote_async (void (*callback) (enum inferior_event_type event_type,
 				void *context), void *context)
 {
-  if (remote_async_mask_value == 0)
-    internal_error (__FILE__, __LINE__,
-		    _("Calling remote_async when async is masked"));
-
   if (callback != NULL)
     {
       serial_async (remote_desc, remote_async_serial_handler, NULL);
@@ -10487,15 +10478,6 @@ remote_async (void (*callback) (enum inf
     serial_async (remote_desc, NULL, NULL);
 }
 
-static int
-remote_async_mask (int new_mask)
-{
-  int curr_mask = remote_async_mask_value;
-
-  remote_async_mask_value = new_mask;
-  return curr_mask;
-}
-
 static void
 set_remote_cmd (char *args, int from_tty)
 {
Index: src/gdb/infrun.c
===================================================================
--- src.orig/gdb/infrun.c	2011-06-06 13:41:36.000000000 +0100
+++ src/gdb/infrun.c	2011-06-06 13:41:50.698861020 +0100
@@ -2739,7 +2739,16 @@ fetch_inferior_event (void *client_data)
      status mechanism.  */
 
   overlay_cache_invalid = 1;
-  registers_changed ();
+
+  /* But don't do it if the current thread is already stopped (hence
+     this is either a delayed event that will result in
+     TARGET_WAITKIND_IGNORE, or it's an event for another thread (and
+     we always clear the register and frame caches when the user
+     switches threads anyway).  If we didn't do this, a spurious
+     delayed event in all-stop mode would make the user lose the
+     selected frame.  */
+  if (non_stop || is_executing (inferior_ptid))
+    registers_changed ();
 
   make_cleanup_restore_integer (&execution_direction);
   execution_direction = target_execution_direction ();


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]