public inbox for archer@sourceware.org
 help / color / mirror / Atom feed
* gdbstub initial code, v5
@ 2010-08-20 17:41 Oleg Nesterov
  2010-08-23 18:55 ` Oleg Nesterov
  2010-08-24 23:36 ` Roland McGrath
  0 siblings, 2 replies; 4+ messages in thread
From: Oleg Nesterov @ 2010-08-20 17:41 UTC (permalink / raw)
  To: archer, utrace-devel

[-- Attachment #1: Type: text/plain, Size: 1460 bytes --]

On 08/19, Oleg Nesterov wrote:
>
> Next step: handle exit correctly and report W/S. I misunderstood
> what gdbserver does when the main thread exits, it is not stupid
> as I wrongly thought.

Yes, in non-stop mode gdbserver reports W/X;process:PID when the
last thread exits. This makes sense, so does ugdb.

But,

==================================================================
All, please ack/nack the behavioral difference!

When the main thread exits, gdbserver still exposes it to gdb as
a running process. It is visible via "info threads", you can switch
to this thread, $Tp or $Hx result in "OK" as if this thread is alive.
gdbserver even pretends that $vCont;x:DEAD_THEAD works, although
this thread obviously can never report something.

I don't think this is really right. This just confuses the user, and
imho this should be considered like the minor bug.

ugdb doesn't do this. If the main thread exits - it exits like any
other thread. I played with gdb, it seems to handle this case fine.

==================================================================

Problems:

	- I forgot to implement the attach to the thread group
	  with the dead leader. Next time.

	- The exit code (Wxx) can be wrong in mt-case.

	  The problem is, ->report_death can't safely access
	  ->group_exit_code with kernel < 2.6.35. This is
	  solveable.

Roland, sorry, I ignored your emails for today. It is not easy to me
to switch between ugdb an utrace ;)

Oleg.

[-- Attachment #2: ugdb.c --]
[-- Type: text/plain, Size: 39062 bytes --]

#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/utrace.h>
#include <linux/poll.h>
#include <linux/mm.h>
#include <linux/regset.h>
#include <asm/uaccess.h>

static int o_remote_debug;
module_param_named(echo, o_remote_debug, bool, 0);

#define BUFFER_SIZE		1024
#define PACKET_SIZE		1024

struct pbuf {
	char	*cur, *pkt;
	char	buf[BUFFER_SIZE];
};

static inline void pb_init(struct pbuf *pb)
{
	pb->cur = pb->buf;
	pb->pkt = NULL;
}

enum {
	U_STOP_IDLE = 0,
	U_STOP_PENDING,
	U_STOP_SENT,
};

struct ugdb {
	struct list_head	u_processes;
	struct list_head	u_stopped;

	int			u_stop_state;

	struct mutex		u_mutex;
	spinlock_t		u_slock;

	struct ugdb_thread
				*u_cur_tinfo,
				*u_cur_hg,
				*u_cur_hc;

	wait_queue_head_t	u_wait;

	int			u_err;

	struct pbuf		u_pbuf;
	char			u_cbuf[PACKET_SIZE];
	int			u_clen;

	unsigned int
				u_no_ack:1,
				u_allstop:1;
};

static inline void ugdb_ck_stopped(struct ugdb *ugdb)
{
	// XXX: temporary racy check
	WARN_ON(!list_empty(&ugdb->u_stopped) &&
				ugdb->u_stop_state == U_STOP_IDLE);
	WARN_ON(list_empty(&ugdb->u_stopped) &&
				ugdb->u_stop_state == U_STOP_PENDING);
}

static struct ugdb *ugdb_create(void)
{
	struct ugdb *ugdb;
	int err;

	err = -ENODEV;
	// XXX: ugly. proc_reg_open() should take care.
	if (!try_module_get(THIS_MODULE))
		goto out;

	err = -ENOMEM;
	ugdb = kzalloc(sizeof(*ugdb), GFP_KERNEL);
	if (!ugdb)
		goto put_module;

	INIT_LIST_HEAD(&ugdb->u_processes);
	INIT_LIST_HEAD(&ugdb->u_stopped);

	mutex_init(&ugdb->u_mutex);
	spin_lock_init(&ugdb->u_slock);

	init_waitqueue_head(&ugdb->u_wait);

	pb_init(&ugdb->u_pbuf);

	return ugdb;

put_module:
	module_put(THIS_MODULE);
out:
	return ERR_PTR(err);
}

#define P_DETACHING	(1 << 1)
#define P_ZOMBIE	(1 << 2)

struct ugdb_process {
	int			p_pid;
	int			p_state;

	struct list_head	p_threads;

	struct ugdb		*p_ugdb;
	struct list_head	p_processes;
};

static inline bool process_alive(struct ugdb_process *process)
{
	return !(process->p_state & P_ZOMBIE);
}

static inline void mark_process_dead(struct ugdb_process *process)
{
	process->p_state |= P_ZOMBIE;
}

static struct ugdb_process *ugdb_create_process(struct ugdb *ugdb, int pid)
{
	struct ugdb_process *process;

	process = kzalloc(sizeof(*process), GFP_KERNEL);
	if (!process)
		return NULL;

	process->p_pid = pid;
	process->p_ugdb = ugdb;
	INIT_LIST_HEAD(&process->p_threads);
	list_add_tail(&process->p_processes, &ugdb->u_processes);

	return process;
}

#define T_STOP_RUN	0
#define T_STOP_REQ	(1 << 0)	/* requested by gdb */
#define T_STOP_ALL	(1 << 1)	/* vCont;c:pX.-1, for report_clone */
#define T_STOP_ACK	(1 << 2)	/* visible to vStopped */
#define T_STOP_STOPPED	(1 << 3)	/* reported as stopped to gdb */
					/* TASK_TRACED + deactivated ? */
struct ugdb_thread {
	int			t_tid;
	int			t_stop_state;
	int			t_stop_code;

	struct ugdb		*t_ugdb;
	struct ugdb_process	*t_process;

	struct list_head	t_threads;
	struct list_head	t_stopped;

	struct pid		*t_spid;

	struct utrace_engine	*t_engine;
};

static inline bool thread_alive(struct ugdb_thread *thread)
{
	WARN_ON((thread->t_tid != 0) != process_alive(thread->t_process));

	return thread->t_tid != 0;
}

static inline void mark_thread_dead(struct ugdb_thread *thread)
{
	mark_process_dead(thread->t_process);
	thread->t_tid = 0;
}

static inline struct task_struct *thread_to_task(struct ugdb_thread *thread)
{
	struct task_struct *task;

	BUG_ON(!thread_alive(thread));
	task = pid_task(thread->t_spid, PIDTYPE_PID);
	BUG_ON(!task);

	return task;
}

static struct ugdb_thread *ugdb_create_thread(struct ugdb_process *process,
						struct pid *spid)
{
	struct ugdb_thread *thread;

	thread = kzalloc(sizeof(*thread), GFP_KERNEL);
	if (!thread)
		return NULL;

	thread->t_tid = pid_vnr(spid);
	thread->t_spid = get_pid(spid);
	thread->t_process = process;
	thread->t_ugdb = process->p_ugdb;
	INIT_LIST_HEAD(&thread->t_stopped);
	list_add_tail(&thread->t_threads, &process->p_threads);

	return thread;
}

static inline void ugdb_del_stopped(struct ugdb *ugdb,
					struct ugdb_thread *thread)
{
	if (list_empty(&thread->t_stopped))
		return;

	WARN_ON(!(thread->t_stop_state & T_STOP_ACK));

	spin_lock(&ugdb->u_slock);
	list_del_init(&thread->t_stopped);

	if (!(thread->t_stop_state & T_STOP_STOPPED)) {
		if (ugdb->u_stop_state == U_STOP_PENDING &&
				list_empty(&ugdb->u_stopped))
			ugdb->u_stop_state = U_STOP_IDLE;
	}
	spin_unlock(&ugdb->u_slock);
}

static void ugdb_destroy_thread(struct ugdb_thread *thread)
{
	struct ugdb *ugdb = thread->t_ugdb;

	ugdb_ck_stopped(ugdb);

	ugdb_del_stopped(ugdb, thread);

	/* NULL if attach fails */
	if (thread->t_engine)
		utrace_engine_put(thread->t_engine);

	list_del(&thread->t_threads);
	put_pid(thread->t_spid);
	kfree(thread);
}

static int ugdb_set_events(struct ugdb_thread *thread,
				unsigned long events)
{
	WARN_ON(!thread_alive(thread));

	events |= (UTRACE_EVENT(CLONE) | UTRACE_EVENT(DEATH));

	return utrace_set_events_pid(thread->t_spid, thread->t_engine,
					events);
}

static int ugdb_control(struct ugdb_thread *thread,
				enum utrace_resume_action action)
{
	// XXX: temporary racy check
	WARN_ON(!thread_alive(thread) && action != UTRACE_DETACH);

	return utrace_control_pid(thread->t_spid, thread->t_engine,
					action);
}

static void ugdb_detach_thread(struct ugdb_thread *thread, bool running)
{
	int ret;

	ret = ugdb_control(thread, UTRACE_DETACH);

	/* engine->flags == 0, it can't run a callback */
	if (!running)
		return;
	/*
	 * Ensure a callback can't race with utrace_destroy_thread().
	 * If we race with ugdb_report_clone() or ugdb_report_death(),
	 * they must see P_DETACHING under ->u_mutex.
	 */
	if (ret == -EINPROGRESS)
		utrace_barrier_pid(thread->t_spid, thread->t_engine);
}

static const struct utrace_engine_ops ugdb_utrace_ops;

/*
 * returns NULL if raced with exit(), or ERR_PTR().
 */
static struct ugdb_thread *ugdb_attach_thread(struct ugdb_process *process,
						struct pid *spid)
{
	struct ugdb_thread *thread;
	struct utrace_engine *engine;
	struct task_struct *task;

	thread = ugdb_create_thread(process, spid);
	if (!thread)
		goto err;

	engine = utrace_attach_pid(thread->t_spid, UTRACE_ATTACH_CREATE,
					&ugdb_utrace_ops, thread);
	if (IS_ERR(engine))
		goto free_thread;

	thread->t_engine = engine;

	if (ugdb_set_events(thread, 0))
		goto detach_thread;

	return thread;

detach_thread:
	ugdb_detach_thread(thread, false);
free_thread:
	ugdb_destroy_thread(thread);
err:
	rcu_read_lock();
	task = pid_task(spid, PIDTYPE_PID);
	if (task && task->exit_state)
		task = NULL;
	rcu_read_unlock();

	return task ? ERR_PTR(-ENOMEM) : NULL;
}

static inline bool is_subthread(struct ugdb_process *process,
				struct ugdb_thread *thread)
{
	return thread && thread->t_process == process;
}

static inline void ugdb_reset_tinfo(struct ugdb *ugdb)
{
	ugdb->u_cur_tinfo = NULL;
}

static void ugdb_destroy_process(struct ugdb_process *process)
{
	struct ugdb *ugdb = process->p_ugdb;
	struct ugdb_thread *thread;

	mutex_lock(&ugdb->u_mutex);
	process->p_state |= P_DETACHING;
	list_del(&process->p_processes);

	if (is_subthread(process, ugdb->u_cur_hg))
		ugdb->u_cur_hg = NULL;
	if (is_subthread(process, ugdb->u_cur_hc))
		ugdb->u_cur_hc = NULL;

	/* I hope gdb won't do detach from under qfThreadInfo */
	if (ugdb->u_cur_tinfo) {
		printk(KERN_WARNING "ugdb: detach from under qfThreadInfo\n");
		ugdb_reset_tinfo(ugdb);
	}
	mutex_unlock(&ugdb->u_mutex);

	while (!list_empty(&process->p_threads)) {
		thread = list_first_entry(&process->p_threads,
				struct ugdb_thread, t_threads);
		ugdb_detach_thread(thread, true);
		ugdb_destroy_thread(thread);
	}

	BUG_ON(!list_empty(&process->p_threads));

	kfree(process);
}

static void ugdb_destroy(struct ugdb *ugdb)
{
	struct ugdb_process *process;

	while (!list_empty(&ugdb->u_processes)) {
		process = list_first_entry(&ugdb->u_processes,
				struct ugdb_process, p_processes);
		ugdb_destroy_process(process);
	}

	BUG_ON(!list_empty(&ugdb->u_processes));
	BUG_ON(!list_empty(&ugdb->u_stopped));

	module_put(THIS_MODULE);
	kfree(ugdb);
}

static struct ugdb_thread *ugdb_attach_main(struct ugdb *ugdb,
						struct ugdb_process *process)
{
	struct ugdb_thread *thread;
	struct pid *spid;

	spid = find_get_pid(process->p_pid);
	if (!spid)
		return NULL;

	thread = ugdb_attach_thread(process, spid);
	if (IS_ERR(thread))
		thread = NULL;

	put_pid(spid);

	return thread;
}

static struct pid *get_next_pid(struct pid *main, struct pid *curr)
{
	struct task_struct *task;
	struct pid *next = NULL;

	task = pid_task(curr, PIDTYPE_PID);
	BUG_ON(!task);

	spin_lock_irq(&task->sighand->siglock);
	for (;;) {
		task = next_thread(task);

		// XXX: BUG: if main is not group leader we can race with exec
		if (task_pid(task) == main)
			break;

		if (!task->exit_state) {
			next = get_pid(task_pid(task));
			break;
		}
	}
	spin_unlock_irq(&task->sighand->siglock);

	return next;
}

static int ugdb_attach(struct ugdb *ugdb, int pid)
{
	struct ugdb_process *process;
	struct ugdb_thread *thread;
	struct pid *main_pid, *curr_pid;

	// XXX: check if exists
	process = ugdb_create_process(ugdb, pid);
	if (!process)
		goto err;

	mutex_lock(&ugdb->u_mutex);

	// XXX: check if group leader ?
	thread = ugdb_attach_main(ugdb, process);
	if (!thread)
		goto abort;

	main_pid = thread->t_spid;
	curr_pid = main_pid;
	for (;;) {
		struct pid *next_pid;

		next_pid = get_next_pid(main_pid, curr_pid);
		if (!next_pid)
			break;

		thread = ugdb_attach_thread(process, next_pid);
		put_pid(next_pid);

		if (IS_ERR(thread))
			goto abort;

		if (!thread)
			continue;

		curr_pid = next_pid;
	}

	// XXX mark it just attached

	mutex_unlock(&ugdb->u_mutex);
	return 0;

abort:
	mutex_unlock(&ugdb->u_mutex);
	ugdb_destroy_process(process);
err:
	return -1;
}

static struct ugdb_process *ugdb_find_process(struct ugdb *ugdb, int pid)
{
	struct ugdb_process *process;

	list_for_each_entry(process, &ugdb->u_processes, p_processes) {
		if (process->p_pid == pid)
			return process;
	}

	return NULL;
}

static struct ugdb_thread *ugdb_find_thread(struct ugdb *ugdb, int pid, int tid)
{
	struct ugdb_process *process;
	struct ugdb_thread *thread;

	list_for_each_entry(process, &ugdb->u_processes, p_processes) {
		if (unlikely(!process_alive(process)))
			continue;
		if (pid && process->p_pid != pid)
			continue;

		list_for_each_entry(thread, &process->p_threads, t_threads) {
			if (WARN_ON(!thread_alive(thread)))
				continue;
			if (!tid || thread->t_tid == tid)
				return thread;
		}

		if (pid)
			break;
	}

	return NULL;
}

static int ugdb_detach(struct ugdb *ugdb, int pid)
{
	struct ugdb_process *process = ugdb_find_process(ugdb, pid);

	if (!process)
		return -1;

	ugdb_destroy_process(process);
	return 0;
}

#define CUR_TINFO_END	((struct ugdb_thread *)1)

static struct ugdb_thread *ugdb_advance_tinfo(struct ugdb *ugdb)
{
	struct ugdb_thread *cur, *nxt;
	struct ugdb_process *process;

	cur = ugdb->u_cur_tinfo;

	if (cur == CUR_TINFO_END) {
		ugdb->u_cur_tinfo = NULL;
		return NULL;
	}

	if (!cur) {
		list_for_each_entry(process, &ugdb->u_processes, p_processes) {
			if (unlikely(!process_alive(process)))
				continue;

			if (!list_empty(&process->p_threads)) {
				cur = list_first_entry(&process->p_threads,
						struct ugdb_thread, t_threads);
				break;
			}
		}

		if (!cur)
			return NULL;
	}

	process = cur->t_process;

	if (list_is_last(&cur->t_threads, &process->p_threads)) {
		nxt = CUR_TINFO_END;

		list_for_each_entry_continue(process, &ugdb->u_processes, p_processes) {
			if (unlikely(!process_alive(process)))
				continue;

			if (!list_empty(&process->p_threads)) {
				nxt = list_first_entry(&process->p_threads,
						struct ugdb_thread, t_threads);
				break;
			}
		}
	} else {
		nxt = list_first_entry(&cur->t_threads,
				struct ugdb_thread, t_threads);
	}

	ugdb->u_cur_tinfo = nxt;
	return cur;
}

// -----------------------------------------------------------------------------
static bool ugdb_add_stopped(struct ugdb_thread *thread)
{
	struct ugdb *ugdb = thread->t_ugdb;
	bool ret = false;

	ugdb_ck_stopped(ugdb);

	spin_lock(&ugdb->u_slock);

	WARN_ON(thread->t_stop_state & T_STOP_ACK);
	if (WARN_ON(!list_empty(&thread->t_stopped)))
		goto unlock;

	/* raced with ugdb_cont_thread() */
	if (!(thread->t_stop_state & T_STOP_REQ))
		goto unlock;

	ret = true;
	thread->t_stop_state |= T_STOP_ACK;
	list_add_tail(&thread->t_stopped, &ugdb->u_stopped);

	if (ugdb->u_stop_state == U_STOP_IDLE) {
		ugdb->u_stop_state = U_STOP_PENDING;
		wake_up_all(&ugdb->u_wait);
	}
unlock:
	spin_unlock(&ugdb->u_slock);

	return ret;
}

static void ugdb_process_exit(struct ugdb_thread *thread)
{
	struct ugdb *ugdb = thread->t_ugdb;

	BUG_ON(!thread_alive(thread));

	ugdb_del_stopped(ugdb, thread);
	mark_thread_dead(thread);

	// XXX: OOPS, we can't read ->signal->group_exit_code !!!
	thread->t_stop_code = current->exit_code;

	// XXX: temporary, for ugdb_add_stopped()
	thread->t_stop_state = T_STOP_REQ;
	ugdb_add_stopped(thread);
}

static int ugdb_stop_thread(struct ugdb_thread *thread, bool all)
{
	struct ugdb *ugdb = thread->t_ugdb;
	int err;

	WARN_ON(!thread_alive(thread));

	ugdb_ck_stopped(ugdb);

	if (thread->t_stop_state != T_STOP_RUN) {
		/*
		 * (gdb) interrupt &
		 * (gbd) interrupt -a &
		 *
		 * make sure -a actually works if it races with clone.
		 */
		if (all && !(thread->t_stop_state & T_STOP_ALL)) {
			/*
			 * We hold ugdb->u_mutex, so we can't race with
			 * ugdb_report_clone(). But we need spinlock to
			 * avoid the race with ugdb_add_stopped() which
			 * can change ->t_stop_state in parallel.
			 */
			spin_lock(&ugdb->u_slock);
			thread->t_stop_state |= T_STOP_ALL;
			spin_unlock(&ugdb->u_slock);
		}

		return 0;
	}

	// XXX: currently we can do this lockless ...
	thread->t_stop_state = all ? (T_STOP_REQ | T_STOP_ALL) : T_STOP_REQ;
	thread->t_stop_code = 0;

	// XXX: we don't do UTRACE_STOP! this means we can't
	// stop TASK_STOPEED task. Need to discuss jctl issues.
	// if we do UTRACE_STOP we should call ugdb_add_stopped().

	ugdb_set_events(thread, UTRACE_EVENT(QUIESCE));
	err = ugdb_control(thread, UTRACE_INTERRUPT);
	if (err && err != -EINPROGRESS)
		return err;
	return 1;
}

static int ugdb_cont_thread(struct ugdb_thread *thread, bool all)
{
	struct ugdb *ugdb = thread->t_ugdb;
	int ret;

	WARN_ON(!thread_alive(thread));

	ugdb_ck_stopped(ugdb);

	// XXX: gdb shouldn't explicitly cont an unreported thread
	WARN_ON(!all && !(thread->t_stop_state & T_STOP_STOPPED));

	if (thread->t_stop_state == T_STOP_RUN)
		return 0;

	spin_lock(&ugdb->u_slock);
	/*
	 * Nothing to do except clear the pending T_STOP_REQ.
	 */
	ret = 0;
	if (!(thread->t_stop_state & T_STOP_ACK))
		goto set_run;

	/*
	 * Alas. Thanks to remote protocol, we can't cont this
	 * thread. We probably already sent the notification, we
	 * can do nothing except ack that %Stop later in response
	 * to vStopped.
	 *
	 * OTOH, gdb shouldn't send 'c' if this thread was not
	 * reported as stopped. However, this means that gdb can
	 * see the new %Stop:T00 notification after vCont;c:pX.-1,
	 * it should handle this case correctly anyway. I hope.
	 *
	 * If this stop was not initiated by gdb we should not
	 * cancel it too, this event should be reported first.
	 */
	ret = -1;
	if (!(thread->t_stop_state & T_STOP_STOPPED))
		goto unlock;

	ret = 1;
	list_del_init(&thread->t_stopped);
set_run:
	thread->t_stop_state = T_STOP_RUN;
unlock:
	spin_unlock(&ugdb->u_slock);

	if (ret >= 0) {
		// XXX: OK, this all is racy, and I do not see any
		// solution except: implement UTRACE_STOP_STICKY and
		// move this code up under the lock, or add
		// utrace_engine_ops->notify_stopped().

		// 1. UTRACE_RESUME is racy, this is fixeable.
		// 2. we need utrace_barrier() to close the race
		//    with the callback which is going to return
		//    UTRACE_STOP, but:
		//    	a) we can deadlock (solveable)
		//	b) in this case UTRACE_RESUME can race with
		//	   another stop initiated by tracee itself.

		ugdb_set_events(thread, 0);
		ugdb_control(thread, UTRACE_RESUME);
	}

	return ret;
}

static struct ugdb_thread *ugdb_next_stopped(struct ugdb *ugdb)
{
	struct ugdb_thread *thread = NULL;

	// XXX: temporary racy check
	WARN_ON(ugdb->u_stop_state == U_STOP_IDLE);

	spin_lock(&ugdb->u_slock);
	if (list_empty(&ugdb->u_stopped)) {
		ugdb->u_stop_state = U_STOP_IDLE;
	} else {
		ugdb->u_stop_state = U_STOP_SENT;

		thread = list_first_entry(&ugdb->u_stopped,
					struct ugdb_thread, t_stopped);

		thread->t_stop_state |= T_STOP_STOPPED;
		list_del_init(&thread->t_stopped);
	}
	spin_unlock(&ugdb->u_slock);

	return thread;
}

// -----------------------------------------------------------------------------
static bool ugdb_stop_pending(struct ugdb_thread *thread)
{
	if (!(thread->t_stop_state & T_STOP_REQ))
		return false;

	if (!(thread->t_stop_state & T_STOP_ACK))
		return ugdb_add_stopped(thread);

	return true;
}

static u32 ugdb_report_quiesce(u32 action, struct utrace_engine *engine,
					unsigned long event)
{
	struct ugdb_thread *thread = engine->data;

	WARN_ON(!process_alive(thread->t_process));

	/* ensure SIGKILL can't race with stop/cont in progress */
	if (event != UTRACE_EVENT(DEATH)) {
		if (ugdb_stop_pending(thread))
			return UTRACE_STOP;
	}

	return utrace_resume_action(action);
}

static u32 ugdb_report_clone(u32 action, struct utrace_engine *engine,
			       unsigned long clone_flags,
			       struct task_struct *task)
{
	struct ugdb_thread *thread = engine->data;
	struct ugdb_process *process = thread->t_process;
	struct ugdb *ugdb = thread->t_ugdb;
	struct ugdb_thread *new_thread;

	WARN_ON(!process_alive(process));

	if (!(clone_flags & CLONE_THREAD))
		goto out;

	mutex_lock(&ugdb->u_mutex);
	if (process->p_state & P_DETACHING)
		goto unlock;

	new_thread = ugdb_attach_thread(process, task_pid(task));
	BUG_ON(!new_thread);

	if (WARN_ON(IS_ERR(new_thread)))
		goto unlock;

	if (thread->t_stop_state & T_STOP_ALL)
		ugdb_stop_thread(new_thread, false);

unlock:
	mutex_unlock(&ugdb->u_mutex);
out:
	return utrace_resume_action(action);
}

static u32 ugdb_report_death(struct utrace_engine *engine,
				bool group_dead, int signal)
{
	struct ugdb_thread *thread = engine->data;
	struct ugdb_process *process = thread->t_process;
	struct ugdb *ugdb = thread->t_ugdb;

	WARN_ON(!process_alive(process));

	mutex_lock(&ugdb->u_mutex);
	if (process->p_state & P_DETACHING)
		goto unlock;

	if (ugdb->u_cur_hg == thread)
		ugdb->u_cur_hg = NULL;
	if (ugdb->u_cur_hc == thread)
		ugdb->u_cur_hc = NULL;

	if (ugdb->u_cur_tinfo == thread)
		ugdb_advance_tinfo(ugdb);

	if (list_is_singular(&process->p_threads))
		ugdb_process_exit(thread);
	else
		ugdb_destroy_thread(thread);

unlock:
	mutex_unlock(&ugdb->u_mutex);

	return UTRACE_DETACH;
}

static const struct utrace_engine_ops ugdb_utrace_ops = {
	.report_quiesce	= ugdb_report_quiesce,
	.report_clone	= ugdb_report_clone,
	.report_death	= ugdb_report_death,
};

// -----------------------------------------------------------------------------
static inline int pb_size(struct pbuf *pb)
{
	return pb->cur - pb->buf;
}

static inline int pb_room(struct pbuf *pb)
{
	return pb->buf + BUFFER_SIZE - pb->cur;
}

static inline void pb_putc(struct pbuf *pb, char c)
{
	if (WARN_ON(pb->cur >= pb->buf + BUFFER_SIZE-1))
		return;
	*pb->cur++ = c;
}

static void pb_memcpy(struct pbuf *pb, const void *data, int size)
{
	if (WARN_ON(size > pb_room(pb)))
		return;
	memcpy(pb->cur, data, size);
	pb->cur += size;
}

static inline void pb_puts(struct pbuf *pb, const char *s)
{
	pb_memcpy(pb, s, strlen(s));
}

static inline void pb_putb(struct pbuf *pb, unsigned char val)
{
	static char hex[] = "0123456789abcdef";
	pb_putc(pb, hex[(val & 0xf0) >> 4]);
	pb_putc(pb, hex[(val & 0x0f) >> 0]);
}

static void pb_putbs(struct pbuf *pb, const char *data, int size)
{
	while (size--)
		pb_putb(pb, *data++);
}

static inline void __pb_start(struct pbuf *pb, char pref)
{
	WARN_ON(pb->pkt);
	pb_putc(pb, pref);
	pb->pkt = pb->cur;
}

static inline void pb_start(struct pbuf *pb)
{
	return __pb_start(pb, '$');
}

static inline void pb_cancel(struct pbuf *pb)
{
	if (WARN_ON(!pb->pkt))
		return;

	pb->cur = pb->pkt - 1;
	pb->pkt = NULL;
}

static void pb_end(struct pbuf *pb)
{
	unsigned char csm = 0;
	char *pkt = pb->pkt;

	pb->pkt = NULL;
	if (WARN_ON(!pkt))
		return;

	while (pkt < pb->cur) {
		WARN_ON(*pkt == '$' || *pkt == '#' || *pkt == '%');
		csm += (unsigned char)*pkt++;
	}

	pb_putc(pb, '#');
	pb_putb(pb, csm);
}

static inline void pb_packs(struct pbuf *pb, const char *s)
{
	pb_start(pb);
	pb_puts(pb, s);
	pb_end(pb);
}

static void __attribute__ ((format(printf, 3, 4)))
__pb_format(struct pbuf *pb, bool whole_pkt, const char *fmt, ...)
{
	int room = pb_room(pb), size;
	va_list args;

	if (whole_pkt)
		pb_start(pb);

	va_start(args, fmt);
	size = vsnprintf(pb->cur, room, fmt, args);
	va_end(args);

	if (WARN_ON(size > room))
		return;

	pb->cur += size;

	if (whole_pkt)
		pb_end(pb);
}

#define pb_printf(pb, args...)	__pb_format((pb), false, args)
#define pb_packf(pb, args...)	__pb_format((pb), true,  args)

static inline void *pb_alloc_bs(struct pbuf *pb, int size)
{
	if (unlikely(pb_room(pb) < 2 * size + 4))
		return NULL;
	return pb->cur + size + 1;
}

static inline void *pb_alloc_tmp(struct pbuf *pb, int size)
{
	if (unlikely(pb_room(pb) < size))
		return NULL;
	return pb->cur + BUFFER_SIZE - size;
}

static inline void pb_flush(struct pbuf *pb, int size)
{
	int keep = pb_size(pb) - size;
	if (keep)
		memmove(pb->buf, pb->buf + size, keep);
	pb->cur -= size;
}

static int pb_copy_to_user(struct pbuf *pb, char __user *ubuf, int size)
{
	int copy = min(size, pb_size(pb));

	if (!copy)
		return -EAGAIN;

	if (o_remote_debug)
		printk(KERN_INFO "<= %.*s\n", min(copy, 64), pb->buf);

	if (copy_to_user(ubuf, pb->buf, copy))
		return -EFAULT;

	pb_flush(pb, copy);
	return copy;
}

// -----------------------------------------------------------------------------
static int ugdb_report_stopped(struct ugdb *ugdb, bool async)
{
	struct ugdb_thread *thread;
	int pid, tid, live, code;
	struct pbuf *pb;

	mutex_lock(&ugdb->u_mutex);
	thread = ugdb_next_stopped(ugdb);
	if (!thread)
		goto unlock;

	live = thread_alive(thread);
	code = thread->t_stop_code;

	pid = thread->t_process->p_pid;
	tid = thread->t_tid;
unlock:
	mutex_unlock(&ugdb->u_mutex);

	if (!thread)
		return false;

	pb = &ugdb->u_pbuf;

	// XXX: damn, cleanup me...
	if (async) {
		__pb_start(pb, '%');
		pb_puts(pb, "Stop:");
	} else {
		pb_start(pb);
	}

	// X9;process:4aee
	// W0;process:4f1c
	if (live) {
		pb_printf(pb, "%sthread:p%x.%x;", "T00", pid, tid);
	} else {
		char r;

		if (code & 0xff) {
			// XXX: renumber signal!
			code &= 0xff;
			r = 'X';
		} else {
			code >>= 8;
			r = 'W';
		}

		pb_printf(pb, "%c%x;process:%x", r, code, pid);
		ugdb_destroy_process(thread->t_process);
	}

	pb_end(pb);

	return true;
}

const char *handle_vstopped(struct ugdb *ugdb)
{
	if (ugdb->u_stop_state != U_STOP_SENT)
		return "E01";

	if (ugdb_report_stopped(ugdb, false))
		return NULL;

	return "OK";
}

static const char *handle_thread_info(struct ugdb *ugdb, bool start)
{
	struct ugdb_thread *thread;
	int pid = 0, tid;

	mutex_lock(&ugdb->u_mutex);
	if (start)
		ugdb_reset_tinfo(ugdb);
	else if (!ugdb->u_cur_tinfo)
		printk(KERN_INFO "ugdb: unexpected qsThreadInfo\n");

	thread = ugdb_advance_tinfo(ugdb);
	if (thread) {
		pid = thread->t_process->p_pid;
		tid = thread->t_tid;
	}
	mutex_unlock(&ugdb->u_mutex);

	if (!pid)
		return start ? "E01" : "l";

	pb_packf(&ugdb->u_pbuf, "mp%x.%x", pid, tid);
	return NULL;
}

static char *parse_xid(char *str, int *ppid, bool multi)
{
	if (*str == '-') {
		str++;

		if (multi && *str++ == '1')
			*ppid = -1;
		else
			str = NULL;
	} else {
		char *cur = str;

		*ppid = simple_strtoul(cur, &str, 16);
		if (str == cur)
			str = NULL;
	}

	return str;
}

static char *parse_pid_tid(char *str, int *ppid, int *ptid, bool multi)
{
	if (*str++ != 'p')
		return NULL;

	str = parse_xid(str, ppid, multi);
	if (!str)
		return NULL;

	if (*str++ != '.')
		return NULL;

	str = parse_xid(str, ptid, multi);
	if (!str)
		return NULL;

	return str;
}

static const char *handle_set_cur(struct ugdb *ugdb, char *cmd)
{

	struct ugdb_thread **pthread;
	int pid, tid;

	switch (*cmd++) {
	case 'g':
		pthread = &ugdb->u_cur_hg;
		break;

	case 'c':
		pthread = &ugdb->u_cur_hc;
		break;

	default:
		goto err;
	}

	if (!parse_pid_tid(cmd, &pid, &tid, false))
		goto err;

	mutex_lock(&ugdb->u_mutex);
	*pthread = ugdb_find_thread(ugdb, pid, tid);
	mutex_unlock(&ugdb->u_mutex);

	if (*pthread)
		return "OK";

err:
	return "E01";
}

static const char *handle_ck_alive(struct ugdb *ugdb, char *cmd)
{
	struct ugdb_thread *thread;
	int pid = 0, tid;

	if (!parse_pid_tid(cmd, &pid, &tid, false))
		goto err;

	mutex_lock(&ugdb->u_mutex);
	thread = ugdb_find_thread(ugdb, pid, tid);
	mutex_unlock(&ugdb->u_mutex);

	if (thread)
		return "OK";

err:
	return "E01";
}

static int parse_pid(char *str)
{
	int pid;

	if (!parse_xid(str, &pid, false))
		return 0;

	return pid;
}

static const char *handle_vattach(struct ugdb *ugdb, char *cmd)
{
	int pid = parse_pid(cmd);

	if (pid && !ugdb_attach(ugdb, pid))
		return "OK";

	return "E01";
}

static const char *handle_detach(struct ugdb *ugdb, char *cmd)
{
	int pid;

	if (*cmd++ != ';')
		goto err;

	pid = parse_pid(cmd);
	if (pid && !ugdb_detach(ugdb, pid))
		return "OK";

err:
	return "E01";
}

typedef int (*each_func_t)(struct ugdb_thread *, void *);

static int ugdb_do_each_thread(struct ugdb *ugdb, int pid, int tid,
				each_func_t func, void *arg)
{
	struct ugdb_process *process;
	struct ugdb_thread *thread;
	int ret = -ESRCH;

	list_for_each_entry(process, &ugdb->u_processes, p_processes) {
		if (unlikely(!process_alive(process)))
			continue;
		if (pid > 0 && process->p_pid != pid)
			continue;

		list_for_each_entry(thread, &process->p_threads, t_threads) {
			if (WARN_ON(!thread_alive(thread)))
				continue;
			if (tid > 0 && thread->t_tid != tid)
				continue;

			ret = func(thread, arg);
			if (ret)
				goto out;

			if (tid >= 0)
				break;
		}

		if (pid >= 0)
			break;
	}

out:
	return ret;
}

static int do_stop_thread(struct ugdb_thread *thread, void *arg)
{
	ugdb_stop_thread(thread, arg != NULL);
	return 0;
}

static int do_cont_thread(struct ugdb_thread *thread, void *arg)
{
	ugdb_cont_thread(thread, arg != NULL);
	return 0;
}

static const char *handle_vcont(struct ugdb *ugdb, char *cmd)
{
	int pid, tid;
	void *arg;
	int ret;

	switch (*cmd ++) {
	default:
		return "E01";
	case '?':
		return "vCont;t";
	case ';':
		break;
	}

	// XXX: Discuss the generic case! currently trivial.

	if (*cmd++ != 't')
		return "E01";

	pid = tid = -1;
	if (*cmd++ == ':') {
		if (!parse_pid_tid(cmd, &pid, &tid, true))
			return "E01";
	}
	arg = (tid >= 0) ? NULL : (void*)1;

	mutex_lock(&ugdb->u_mutex);
	// XXX: currently we only report -ESRCH
	ret = ugdb_do_each_thread(ugdb, pid, tid, do_stop_thread, arg);
	mutex_unlock(&ugdb->u_mutex);

	return ret < 0 ? "E01" : "OK";
}

static const char *handle_c(struct ugdb *ugdb, char *cmd)
{
	const char *rc = "E01";

	mutex_lock(&ugdb->u_mutex);
	if (ugdb->u_cur_hc)
		if (ugdb_cont_thread(ugdb->u_cur_hc, false) > 0)
			rc = "OK";
	mutex_unlock(&ugdb->u_mutex);

	return rc;
}

// -----------------------------------------------------------------------------
static struct task_struct *
ugdb_prepare_examine(struct ugdb *ugdb, struct utrace_examiner *exam)
{
	struct ugdb_thread *thread;
	struct task_struct *task;
	int err;

	mutex_lock(&ugdb->u_mutex);
	thread = ugdb->u_cur_hg;
	if (!thread || !(thread->t_stop_state & T_STOP_STOPPED))
		goto err;

	// XXX: u_cur_hg can't exit, we hold the mutex
	task = thread_to_task(thread);

	for (;;) {
		if (fatal_signal_pending(current))
			goto err;

		err = utrace_prepare_examine(task, thread->t_engine, exam);
		if (!err)
			break;

		if (err == -ESRCH)
			goto err;

		schedule_timeout_interruptible(1);
	}

	return task;

err:
	mutex_unlock(&ugdb->u_mutex);
	return NULL;
}

// XXX: we hold the mutex in between, but only because we can't
// use get_task_struct/put_task_struct.

static int
ugdb_finish_examine(struct ugdb *ugdb, struct utrace_examiner *exam)
{
	// XXX: u_cur_hg can't exit, we hold the mutex
	struct ugdb_thread *thread = ugdb->u_cur_hg;
	struct task_struct *task = thread_to_task(thread);

	int ret = utrace_finish_examine(task, thread->t_engine, exam);

	mutex_unlock(&ugdb->u_mutex);
	return ret;
}

#define REGSET_GENERAL	0
// stolen from gdb-7.1/gdb/gdbserver/linux-x86-low.c
static int x86_64_regmap[] = {
	80, 40, 88, 96, 104, 112, 32, 152, 72, 64, 56, 48, 24, 16,
	8, 0, 128, 144, 136, 160, 184, 192, 200, 208, -1, -1, -1, -1,
	-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
	-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 120,
};

static char *handle_getregs(struct ugdb *ugdb)
{
	struct utrace_examiner exam;
	struct task_struct *task;
	const struct user_regset_view *view;
	const struct user_regset *rset;
	struct user_regs_struct *pregs;
	int rn;

	static int pkt_size;
	if (!pkt_size) {
		int sz = 0;
		for (rn = 0; rn < ARRAY_SIZE(x86_64_regmap); ++rn) {
			int offs = x86_64_regmap[rn];
			if (offs < 0)
				continue;
			if (offs > (sizeof(*pregs) - sizeof(long))) {
				printk(KERN_INFO "XXX: x86_64_regmap is wrong!\n");
				ugdb->u_err = -EINVAL;
				goto err;
			}
			sz += sizeof(long) * 2;
		}
		pkt_size = sz;
	}

	if (pb_room(&ugdb->u_pbuf) < 4 + pkt_size + sizeof(*pregs)) {
		printk(KERN_INFO "XXX: getregs ENOMEM %d %ld\n",
					pkt_size, sizeof(*pregs));
		goto err;
	}

	pregs = pb_alloc_tmp(&ugdb->u_pbuf, sizeof(*pregs));
	BUG_ON(pregs + 1 != (void*)ugdb->u_pbuf.cur + BUFFER_SIZE);

	task = ugdb_prepare_examine(ugdb, &exam);
	if (!task)
		goto err;

	view = task_user_regset_view(task);
	rset = view->regsets + REGSET_GENERAL;

	rset->get(task, rset, 0, sizeof(*pregs), pregs, NULL);

	if (ugdb_finish_examine(ugdb, &exam))
		goto err;

	pb_start(&ugdb->u_pbuf);
	for (rn = 0; rn < ARRAY_SIZE(x86_64_regmap); ++rn) {
		int offs = x86_64_regmap[rn];
		if (offs >= 0)
			pb_putbs(&ugdb->u_pbuf, (void*)pregs + offs,
					sizeof(long));
	}

	WARN_ON(pb_room(&ugdb->u_pbuf) < sizeof(*pregs));
	pb_end(&ugdb->u_pbuf);
	return NULL;
err:
	return "E01";
}

static typeof(access_process_vm) *u_access_process_vm;

static const char *apvm(struct ugdb *ugdb, struct task_struct *task,
			unsigned long addr, int size)
{
	unsigned char *mbuf;

	mbuf = pb_alloc_bs(&ugdb->u_pbuf, size);
	if (!mbuf) {
		printk(KERN_INFO "XXX: apvm(%d) ENOMEM\n", size);
		goto err;
	}

	size = u_access_process_vm(task, addr, mbuf, size, 0);
	if (size <= 0)
		goto err;

	pb_start(&ugdb->u_pbuf);
	pb_putbs(&ugdb->u_pbuf, mbuf, size);
	pb_end(&ugdb->u_pbuf);
	return NULL;
err:
	return "E01";
}

static const char *handle_readmem(struct ugdb *ugdb, char *cmd)
{
	struct utrace_examiner exam;
	struct task_struct *task;
	unsigned long addr, size;
	const char *ret = "E01";

	if (sscanf(cmd, "m%lx,%lx", &addr, &size) != 2)
		goto out;

	task = ugdb_prepare_examine(ugdb, &exam);
	if (!task)
		goto out;

	ret = apvm(ugdb, task, addr, size);

	/* Too late to report the error*/
	if (ugdb_finish_examine(ugdb, &exam))
		;
out:
	return ret;
}

// -----------------------------------------------------------------------------
#define EQ(cmd, str)					\
	(strncmp((cmd), (str), sizeof(str)-1) ?	false :	\
		((cmd) += sizeof(str)-1, true))

static void handle_command(struct ugdb *ugdb, char *cmd, int len)
{
	struct pbuf *pb = &ugdb->u_pbuf;
	const char *rc = "";

	switch (cmd[0]) {
	case '!':
	case '?':
		rc = "OK";
		break;

	case 'H':
		rc = handle_set_cur(ugdb, cmd + 1);
		break;

	case 'T':
		rc = handle_ck_alive(ugdb, cmd + 1);
		break;

	case 'D':
		rc = handle_detach(ugdb, cmd + 1);
		break;

	case 'g':
		rc = handle_getregs(ugdb);
		break;

	case 'm':
		rc = handle_readmem(ugdb, cmd);
		break;

	case 'c':
		rc = handle_c(ugdb, cmd);
		break;

	case 'q':
		if (EQ(cmd, "qSupported")) {
			if (!strstr(cmd, "multiprocess+")) {
				printk(KERN_INFO "ugdb: can't work without multiprocess\n");
				ugdb->u_err = -EPROTONOSUPPORT;
			}

			pb_packf(&ugdb->u_pbuf, "PacketSize=%x;%s",
				PACKET_SIZE,
				"QStartNoAckMode+;QNonStop+;multiprocess+");
			rc = NULL;
		}
		else if (EQ(cmd, "qfThreadInfo")) {
			rc = handle_thread_info(ugdb, true);
		}
		else if (EQ(cmd, "qsThreadInfo")) {
			rc = handle_thread_info(ugdb, false);
		}
		else if (EQ(cmd, "qTStatus")) {
			rc = "T0";
		}

		break;

	case 'Q':
		if (EQ(cmd, "QStartNoAckMode")) {
			ugdb->u_no_ack = true;
			rc = "OK";
		}
		else if (EQ(cmd, "QNonStop:")) {
			if (*cmd != '1') {
				printk(KERN_INFO "ugdb: all-stop is not implemented.\n");
				ugdb->u_err = -EPROTONOSUPPORT;
			}

			rc = "OK";
		}

		break;

	case 'v':
		if (EQ(cmd, "vAttach;")) {
			rc = handle_vattach(ugdb, cmd);
		}
		else if (EQ(cmd, "vStopped")) {
			rc = handle_vstopped(ugdb);
		}
		else if (EQ(cmd, "vCont")) {
			rc = handle_vcont(ugdb, cmd);
		}

		break;

	default:
		;
	}

	if (rc)
		pb_packs(pb, rc);
}

static void process_commands(struct ugdb *ugdb)
{
	char *cmds = ugdb->u_cbuf;
	int todo = ugdb->u_clen;

	if (o_remote_debug)
		printk(KERN_INFO "=> %.*s\n", ugdb->u_clen, ugdb->u_cbuf);

	while (todo) {
		char first;
		char *c_cmd, *c_end;
		int c_len;

		first = *cmds++;
		todo--;

		switch (first) {
		default:
			printk(KERN_INFO "XXX: unknown chr %02x\n", first);
			pb_putc(&ugdb->u_pbuf, '-');
			break;

		case '-':
			printk(KERN_INFO "XXX: got NACK!\n");
			ugdb->u_err = -EPROTO;
		case '+':
			break;

		case 0x3:
			printk(KERN_INFO "XXX: unexpected CTRL-C\n");
			break;

		case '$':
			c_cmd = cmds;
			c_end = strnchr(c_cmd, todo, '#');
			c_len = c_end ? c_end - cmds : -1;

			if (c_len < 0 || todo < c_len + 3) {
				printk(KERN_INFO "XXX: can't find '#cs'\n");
				++todo;
				--cmds;
				goto out;
			}

			// XXX: verify checksum ?
			todo -= c_len + 3;
			cmds += c_len + 3;
			*c_end = 0;

			if (!ugdb->u_no_ack)
				pb_putc(&ugdb->u_pbuf, '+');

			handle_command(ugdb, c_cmd, c_len);
		}
	}
out:
	ugdb->u_clen = todo;
	if (todo && cmds > ugdb->u_cbuf)
		memmove(ugdb->u_cbuf, cmds, todo);
}

// -----------------------------------------------------------------------------
static int xxx_tinfo(struct ugdb *ugdb)
{
	struct ugdb_thread *thread;
	int tid = 0;

	mutex_lock(&ugdb->u_mutex);
	thread = ugdb_advance_tinfo(ugdb);
	if (thread)
		tid = thread->t_tid;
	mutex_unlock(&ugdb->u_mutex);

	return tid;
}

static int xxx_sc_threads(struct ugdb *ugdb, int tid, bool sc)
{
	void *arg = NULL;
	int pid = 0;
	int ret;

	if (tid < 0) {
		pid = -tid;
		tid = -1;
		arg = (void*)1;
	}

	mutex_lock(&ugdb->u_mutex);
	ret = ugdb_do_each_thread(ugdb, pid, tid,
				sc ? do_stop_thread : do_cont_thread,
				arg);
	mutex_unlock(&ugdb->u_mutex);

	return ret;
}

static int xxx_stop(struct ugdb *ugdb, int tid)
{
	return xxx_sc_threads(ugdb, tid, true);
}

static int xxx_cont(struct ugdb *ugdb, int tid)
{
	return xxx_sc_threads(ugdb, tid, false);
}

static int xxx_get_stopped(struct ugdb *ugdb)
{
	struct ugdb_thread *thread;
	int tid = 1;

	if (ugdb->u_stop_state == U_STOP_IDLE)
		return -1;

	if (ugdb->u_stop_state == U_STOP_PENDING)
		tid = 1000;

	thread = ugdb_next_stopped(ugdb);
	if (!thread)
		return 0;
	return tid * thread->t_tid;
}

static long ugdb_f_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	struct ugdb *ugdb = file->private_data;
	// XXX: otherwise gdb->get_tty_state(TCGETS, TCSETS, TCFLSH) complains
	int ret = 0;

	// XXX: temporary debugging hooks, ignore.
	switch (cmd) {
		case 0x666 + 0:
			ret = ugdb_attach(ugdb, arg);
			break;

		case 0x666 + 1:
			ret = ugdb_detach(ugdb, arg);
			break;

		case 0x666 + 2:
			ret = xxx_tinfo(ugdb);
			break;

		case 0x666 + 3:
			ret = xxx_stop(ugdb, arg);
			break;

		case 0x666 + 4:
			ret = xxx_cont(ugdb, arg);
			break;

		case 0x666 + 5:
			ret = xxx_get_stopped(ugdb);
			break;
	}

	return ret;
}

static unsigned int ugdb_f_poll(struct file *file, poll_table *wait)
{
	struct ugdb *ugdb = file->private_data;
	unsigned int mask;

	poll_wait(file, &ugdb->u_wait, wait);

	mask = (POLLOUT | POLLWRNORM);

	if (pb_size(&ugdb->u_pbuf) || ugdb->u_stop_state == U_STOP_PENDING)
		mask |= (POLLIN | POLLRDNORM);

	if (ugdb->u_err)
		mask |= POLLERR;

	return mask;
}

static ssize_t ugdb_f_read(struct file *file, char __user *ubuf,
				size_t count, loff_t *ppos)
{
	struct ugdb *ugdb = file->private_data;
	struct pbuf *pb = &ugdb->u_pbuf;

	if (ugdb->u_err)
		return ugdb->u_err;

	if (ugdb->u_stop_state == U_STOP_PENDING)
		ugdb_report_stopped(ugdb, true);

	if (pb_size(pb) > count) {
		printk(KERN_INFO "XXX: short read %d %ld\n",
					pb_size(pb), count);
	}

	count = pb_copy_to_user(pb, ubuf, count);
	if (count > 0)
		*ppos += count;
	return count;
}

static ssize_t ugdb_f_write(struct file *file, const char __user *ubuf,
				size_t count, loff_t *ppos)
{
	struct ugdb *ugdb = file->private_data;

	if (ugdb->u_err)
		return ugdb->u_err;

	if (count > PACKET_SIZE - ugdb->u_clen) {
		count = PACKET_SIZE - ugdb->u_clen;
		printk("XXX: write(%ld,%d) enospc\n", count, ugdb->u_clen);
	}
	if (copy_from_user(ugdb->u_cbuf + ugdb->u_clen, ubuf, count))
		return -EFAULT;

	ugdb->u_clen += count;
	process_commands(ugdb);

	*ppos += count;
	return count;
}

static int ugdb_f_open(struct inode *inode, struct file *file)
{
	nonseekable_open(inode, file);

	file->private_data = ugdb_create();

	return	IS_ERR(file->private_data) ?
		PTR_ERR(file->private_data) : 0;
}

static int ugdb_f_release(struct inode *inode, struct file *file)
{
	ugdb_destroy(file->private_data);

	return 0;
}

static const struct file_operations ugdb_f_ops = {
	.open			= ugdb_f_open,
	.unlocked_ioctl		= ugdb_f_ioctl,
	.poll			= ugdb_f_poll,
	.read			= ugdb_f_read,
	.write			= ugdb_f_write,
	.release		= ugdb_f_release,
};

#include <linux/kallsyms.h>

struct kallsyms_sym {
	const char	*name;
	unsigned long	addr;
};

static int kallsyms_on_each_symbol_cb(void *data, const char *name,
				struct module *mod, unsigned long addr)
{
	struct kallsyms_sym *sym = data;

	if (strcmp(name, sym->name))
		return 0;

	sym->addr = addr;
	return 1;
}

// XXX: kallsyms_lookup_name() is not exported in 2.6.32
static bool lookup_unexported(void)
{
	struct kallsyms_sym sym;

	sym.name = "access_process_vm";
	if (!kallsyms_on_each_symbol(kallsyms_on_each_symbol_cb, &sym))
		goto err;
	u_access_process_vm = (void*)sym.addr;

	return true;
err:
	printk(KERN_ERR "ugdb: can't lookup %s\n", sym.name);
	return false;
}

#define PROC_NAME	"ugdb"
struct proc_dir_entry *ugdb_pde;

static int __init ugdb_init(void)
{
	if (!lookup_unexported())
		return -ESRCH;

	ugdb_pde = proc_create(PROC_NAME, S_IFREG|S_IRUGO|S_IWUGO,
				NULL, &ugdb_f_ops);
	if (!ugdb_pde)
		return -EBADF;

	return 0;
}

static void __exit ugdb_exit(void)
{
	remove_proc_entry(PROC_NAME, NULL);
}

MODULE_LICENSE("GPL");
module_init(ugdb_init);
module_exit(ugdb_exit);

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: gdbstub initial code, v5
  2010-08-20 17:41 gdbstub initial code, v5 Oleg Nesterov
@ 2010-08-23 18:55 ` Oleg Nesterov
  2010-08-24 17:10   ` Oleg Nesterov
  2010-08-24 23:36 ` Roland McGrath
  1 sibling, 1 reply; 4+ messages in thread
From: Oleg Nesterov @ 2010-08-23 18:55 UTC (permalink / raw)
  To: archer, utrace-devel

Just a small report to explain what I am doing...

On 08/20, Oleg Nesterov wrote:
>
> 	- I forgot to implement the attach to the thread group
> 	  with the dead leader. Next time.

Almost done, but we should avoid the races with exec somehow.
But this is minor.

I tried to test this code as much as I can. Again, I do not use
gdb at all, I am using the scripts which try to really stress ugdb.

Found 2 bugs in ugdb.ko, the second one is not nice but at least
I have the temporary fix.

However. I spent all Monday trying to resolve the new bug, and
so far I do not understand what happens. Extremely hard to reproduce,
and the kernel just hangs silently, without any message.
So far I suspect the proble in utrace.c, but this time I am not sure.

Will continue tomorrow...

Oleg.

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: gdbstub initial code, v5
  2010-08-23 18:55 ` Oleg Nesterov
@ 2010-08-24 17:10   ` Oleg Nesterov
  0 siblings, 0 replies; 4+ messages in thread
From: Oleg Nesterov @ 2010-08-24 17:10 UTC (permalink / raw)
  To: archer, utrace-devel

On 08/23, Oleg Nesterov wrote:
>
> However. I spent all Monday trying to resolve the new bug, and
> so far I do not understand what happens. Extremely hard to reproduce,
> and the kernel just hangs silently, without any message.
> So far I suspect the proble in utrace.c, but this time I am not sure.

Solved. This was scheduler bug fixed in 2.6.35, but I used 2.6.34.
This is really funny. This bug (PF_STARTING lockup) was found and
fixed by me & Peter.

Oh. But I hit yet another problem, BUG_ON() in __utrace_engine_release().
Again, it is not reproducible, I saw it only once in dmesg and I do
not even know for sure what I was doing.

I'll contiue tomorrow, but if I won't be able to quickly resolve
this problem I am going to ignore it for now. This time I think
ugdb is wrong.

Oleg.

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: gdbstub initial code, v5
  2010-08-20 17:41 gdbstub initial code, v5 Oleg Nesterov
  2010-08-23 18:55 ` Oleg Nesterov
@ 2010-08-24 23:36 ` Roland McGrath
  1 sibling, 0 replies; 4+ messages in thread
From: Roland McGrath @ 2010-08-24 23:36 UTC (permalink / raw)
  To: Oleg Nesterov; +Cc: archer, utrace-devel

> When the main thread exits, gdbserver still exposes it to gdb as
> a running process. It is visible via "info threads", you can switch
> to this thread, $Tp or $Hx result in "OK" as if this thread is alive.
> gdbserver even pretends that $vCont;x:DEAD_THEAD works, although
> this thread obviously can never report something.

This is sort of consistent with the kernel treatment.  The main thread
stays around as a zombie, acting as a moniker for the whole process.  But
indeed that is not actually useful for any thread-granularity control or
information (well, there is the dead thread's usage stats, but that's all).

> I don't think this is really right. This just confuses the user, and
> imho this should be considered like the minor bug.

I tend to agree, but don't think it's a big issue either way, really.

> ugdb doesn't do this. If the main thread exits - it exits like any
> other thread. I played with gdb, it seems to handle this case fine.

Sounds good to me!

> 	- The exit code (Wxx) can be wrong in mt-case.
> 
> 	  The problem is, ->report_death can't safely access
> 	  ->group_exit_code with kernel < 2.6.35. This is
> 	  solveable.

Don't even worry about it.  If there is something trivial to do that makes
it better for earlier kernels, then go ahead.  But if the easy thing to do
gives correct results on >=2.6.35 and racily wrong or random results on
older kernels, then we can just live with that.


Thanks,
Roland

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2010-08-24 23:36 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-08-20 17:41 gdbstub initial code, v5 Oleg Nesterov
2010-08-23 18:55 ` Oleg Nesterov
2010-08-24 17:10   ` Oleg Nesterov
2010-08-24 23:36 ` Roland McGrath

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).