public inbox for archer@sourceware.org
 help / color / mirror / Atom feed
* gdbstub initial code, v16
@ 2010-11-15 19:12 Oleg Nesterov
  2010-11-16 20:02 ` Roland McGrath
       [not found] ` <20110215204148.GA17258@host1.dyn.jankratochvil.net>
  0 siblings, 2 replies; 4+ messages in thread
From: Oleg Nesterov @ 2010-11-15 19:12 UTC (permalink / raw)
  To: archer, utrace-devel

[-- Attachment #1: Type: text/plain, Size: 1166 bytes --]

The only change is hardware watchpoints.

Well. I can't say this change is good. Because ugdb uses (unexported)
arch_ptrace() to set debugregs in a very much x86-specific way. However,
I do not see what else can I do.

2.6.32 doesn't have the generic hardware breakpoint handler interface.
And I can't play with thread.debugregX by hand, otherwise ugdb can't
be compiled with the fresh kernels.

arch_ptrace() method should work (I hope) with any kernel. But, this
obviously means more multitracing problems. Perhaps ugdb needs the
"can_use_hw_watchpoints" parameter.

Also, currently the usage of debugregs is far from optimal, hopefully
it is simple to improve.


And. I think it makes sense to change gdb somehow. Even if it works
with gdbserver, it falls back to stepping if the size of wp is too
big for hw (default_region_ok_for_hw_watchpoint). This means that
ugdb can't be faster in this case although it obviously could.


What should I do next? (apart from internal changes, of course)

Say, should I implement vRun? From the very beginnig, I hate the idea
to exec the target from kernel space, but otoh I'm afraid this is
important for gdb users.

Oleg.


[-- Attachment #2: ugdb.c --]
[-- Type: text/plain, Size: 75629 bytes --]

#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/utrace.h>
#include <linux/kernel.h>
#include <linux/poll.h>
#include <linux/mm.h>
#include <linux/regset.h>
#include <asm/uaccess.h>

static int o_remote_debug;
module_param_named(echo, o_remote_debug, bool, 0);

#define BUFFER_SIZE		1152
#define PACKET_SIZE		1152

struct pbuf {
	char	*cur, *pkt;
	char	buf[BUFFER_SIZE];
};

static inline void pb_init(struct pbuf *pb)
{
	pb->cur = pb->buf;
	pb->pkt = NULL;
}

enum {
	U_STOP_IDLE = 0,
	U_STOP_PENDING,
	U_STOP_SENT,
};

#define u_after(a, b)	((long)(b) - (long)(a) < 0)

struct ugdb {
	struct list_head	u_processes;
	struct list_head	u_stopped;

	int			u_stop_state;

	struct mutex		u_mutex;
	spinlock_t		u_slock;

	struct ugdb_thread
				*u_cur_tinfo,
				*u_cur_hg,
				*u_cur_hc;

	unsigned long		u_genctr;

	wait_queue_head_t	u_wait;

	int			u_err;

	struct pbuf		u_pbuf;
	char			u_cbuf[PACKET_SIZE];
	int			u_clen;

	sigset_t		u_sig_ign;

	unsigned int
				u_no_ack:1,
				u_allstop:1;
};

static inline void ugdb_ck_stopped(struct ugdb *ugdb)
{
	spin_lock(&ugdb->u_slock);
	WARN_ON(!list_empty(&ugdb->u_stopped) &&
				ugdb->u_stop_state == U_STOP_IDLE);
	WARN_ON(list_empty(&ugdb->u_stopped) &&
				ugdb->u_stop_state == U_STOP_PENDING);
	spin_unlock(&ugdb->u_slock);
}

static struct ugdb *ugdb_create(void)
{
	struct ugdb *ugdb;
	int err;

	err = -ENODEV;
	// XXX: ugly. proc_reg_open() should take care.
	if (!try_module_get(THIS_MODULE))
		goto out;

	err = -ENOMEM;
	ugdb = kzalloc(sizeof(*ugdb), GFP_KERNEL);
	if (!ugdb)
		goto put_module;

	INIT_LIST_HEAD(&ugdb->u_processes);
	INIT_LIST_HEAD(&ugdb->u_stopped);

	mutex_init(&ugdb->u_mutex);
	spin_lock_init(&ugdb->u_slock);

	init_waitqueue_head(&ugdb->u_wait);

	pb_init(&ugdb->u_pbuf);

	return ugdb;

put_module:
	module_put(THIS_MODULE);
out:
	return ERR_PTR(err);
}

struct x86_hw_wp {
	int		nr;
	unsigned long	dr[4];
	unsigned long	dr7;
};

struct p_wp {
	struct rw_semaphore	wp_sem;
	struct list_head	wp_list;
	int			wp_cnt, wp_mem;

	struct x86_hw_wp	hw_wp;
};

static inline void p_wp_init(struct p_wp *p_wp)
{
	init_rwsem(&p_wp->wp_sem);
	INIT_LIST_HEAD(&p_wp->wp_list);
}

static void p_wp_free(struct p_wp *p_wp);

#define P_DETACHING	(1 << 1)
#define P_ZOMBIE	(1 << 2)

struct ugdb_process {
	int			p_pid;
	int			p_state;

	struct p_wp		p_wp;

	struct list_head	p_threads;

	struct ugdb		*p_ugdb;
	struct list_head	p_processes;
};

static inline bool process_alive(struct ugdb_process *process)
{
	return !(process->p_state & P_ZOMBIE);
}

static inline void mark_process_dead(struct ugdb_process *process)
{
	process->p_state |= P_ZOMBIE;
}

static struct ugdb_process *ugdb_create_process(struct ugdb *ugdb, int pid_nr)
{
	struct ugdb_process *process;

	process = kzalloc(sizeof(*process), GFP_KERNEL);
	if (!process)
		return NULL;

	process->p_pid = pid_nr;
	process->p_ugdb = ugdb;
	INIT_LIST_HEAD(&process->p_threads);
	list_add_tail(&process->p_processes, &ugdb->u_processes);

	p_wp_init(&process->p_wp);

	return process;
}

#define T_STOP_RUN	0
#define T_STOP_REQ	(1 << 0)	/* requested by gdb */
#define T_STOP_ALL	(1 << 1)	/* vCont;c:pX.-1, for report_clone */
#define T_STOP_ACK	(1 << 2)	/* visible to vStopped */
#define T_STOP_STOPPED	(1 << 3)	/* reported as stopped to gdb */
					/* TASK_TRACED + deactivated ? */

#define T_EV_NONE		0
#define T_EV_EXIT		(1 << 24)
#define T_EV_SIGN		(2 << 24)

#define T_EV_TYPE(event)	((0xff << 24) & (event))
#define T_EV_DATA(event)	(~(0xff << 24) & (event))

#define HW_WP_NONE	0
#define HW_WP_ACTIVE	1
#define HW_WP_INVALID	2

struct t_wp {
	struct wp*	wp_cur;

	int		hw_wp_state;
};

static inline void t_wp_init(struct ugdb_thread *thread)
{
}

#define T_STEP_HARD	1
#define T_STEP_SOFT	2

struct ugdb_thread {
	int			t_tid;
	int			t_stop_state;
	int			t_stop_event;
	int			t_step; 	// XXX: move me into t_stop_event

	struct t_wp		t_wp;

	siginfo_t		*t_siginfo;

	struct ugdb		*t_ugdb;
	struct ugdb_process	*t_process;

	unsigned long		t_genctr;

	struct list_head	t_threads;
	struct list_head	t_stopped;

	struct pid		*t_spid;

	struct utrace_engine	*t_engine;
};

static inline bool thread_alive(struct ugdb_thread *thread)
{
	WARN_ON((thread->t_tid != 0) != process_alive(thread->t_process));

	return thread->t_tid != 0;
}

static inline void mark_thread_dead(struct ugdb_thread *thread)
{
	mark_process_dead(thread->t_process);
	thread->t_tid = 0;
}

/*
 * The thread should be alive, and it can't pass ugdb_report_death()
 * if the caller holds ugdb->u_mutex. However the tracee can be
 * reaped anyway, pid_task() can return NULL after detach_pid().
 */
static inline struct task_struct *thread_to_task(struct ugdb_thread *thread)
{
	BUG_ON(!thread_alive(thread));

	return pid_task(thread->t_spid, PIDTYPE_PID);
}

static struct ugdb_thread *ugdb_create_thread(struct ugdb_process *process,
						struct pid *spid)
{
	struct ugdb_thread *thread;

	thread = kzalloc(sizeof(*thread), GFP_KERNEL);
	if (!thread)
		return NULL;

	thread->t_tid = pid_vnr(spid);
	thread->t_spid = get_pid(spid);
	thread->t_process = process;
	thread->t_ugdb = process->p_ugdb;
	thread->t_genctr = thread->t_ugdb->u_genctr;
	INIT_LIST_HEAD(&thread->t_stopped);
	list_add_tail(&thread->t_threads, &process->p_threads);

	t_wp_init(thread);

	return thread;
}

static void ugdb_del_stopped(struct ugdb *ugdb, struct ugdb_thread *thread)
{
	if (!list_empty(&thread->t_stopped)) {
		WARN_ON(!(thread->t_stop_state & T_STOP_ACK));

		spin_lock(&ugdb->u_slock);
		list_del_init(&thread->t_stopped);

		if (!(thread->t_stop_state & T_STOP_STOPPED)) {
			if (ugdb->u_stop_state == U_STOP_PENDING &&
					list_empty(&ugdb->u_stopped))
				ugdb->u_stop_state = U_STOP_IDLE;
		}
		spin_unlock(&ugdb->u_slock);
	}

	thread->t_stop_state = T_STOP_RUN;
}

static void ugdb_destroy_thread(struct ugdb_thread *thread)
{
	struct ugdb *ugdb = thread->t_ugdb;

	ugdb_ck_stopped(ugdb);

	ugdb_del_stopped(ugdb, thread);

	/* NULL if attach fails */
	if (thread->t_engine)
		utrace_engine_put(thread->t_engine);

	list_del(&thread->t_threads);
	put_pid(thread->t_spid);
	kfree(thread);
}

static int ugdb_set_events(struct ugdb_thread *thread,
				unsigned long events)
{
	WARN_ON(!thread_alive(thread));

	events |= (UTRACE_EVENT(CLONE) | UTRACE_EVENT(DEATH) |
			UTRACE_EVENT_SIGNAL_ALL);

	//XXX: I think utrace_get_signal() is buggy !!!!!!!!!!!!
	events |= UTRACE_EVENT(QUIESCE);

	return utrace_set_events_pid(thread->t_spid, thread->t_engine,
					events);
}

static int ugdb_control(struct ugdb_thread *thread,
				enum utrace_resume_action action)
{
	// XXX: temporary racy check
	WARN_ON(!thread_alive(thread) && action != UTRACE_DETACH);

	return utrace_control_pid(thread->t_spid, thread->t_engine,
					action);
}

static const struct utrace_engine_ops ugdb_utrace_ops;

static void ugdb_detach_thread(struct ugdb_thread *thread, bool running)
{
	int ret;

	ret = ugdb_control(thread, UTRACE_DETACH);

	/* engine->flags == 0, it can't run a callback */
	if (!running)
		return;

	/* callbacks are no longer possible */
	if (!ret)
		return;

	if (ret == -EINPROGRESS) {
		/*
		 * We raced with a callback, it can't be ->report_death().
		 * However, we can not use utrace_barrier_pid(), it can
		 * hang "forever" until the next utrace_resume() if it
		 * sees ->ops == &utrace_detached_ops set by us, but the
		 * tracee is no longer running.
		 *
		 * But: we have no choice.
		 */
		do {
			ret = utrace_barrier_pid(thread->t_spid,
						thread->t_engine);
		} while (ret == -ERESTARTSYS);
	} else {
		/*
		 * Nothing can help us to synchronize with ->report_death.
		 * We do not know if it was already called or not, we can't
		 * know if it is running. utrace_barrier_pid() can't help,
		 * it can return zero and then later ->report_death() will
		 * be called. Or it can return -ESRCH just because the task
		 * was alredy released and pid_task() == NULL, but this
		 * doesn't mean ->report_death() can't be called later.
		 *
		 * Fortunately, we know that the tracee is dying or dead,
		 * engine->ops should be changed after ugdb_report_death()
		 * returns UTRACE_DETACH.
		 */
		 while (thread->t_engine->ops == &ugdb_utrace_ops) {
		 	schedule_timeout_uninterruptible(1);
		 }
	}
}

/*
 * returns NULL if raced with exit(), or ERR_PTR().
 */
static struct ugdb_thread *ugdb_attach_thread(struct ugdb_process *process,
						struct pid *spid)
{
	struct ugdb_thread *thread;
	struct utrace_engine *engine;
	struct task_struct *task;

	thread = ugdb_create_thread(process, spid);
	if (!thread)
		goto err;

	engine = utrace_attach_pid(thread->t_spid, UTRACE_ATTACH_CREATE,
					&ugdb_utrace_ops, thread);
	if (IS_ERR(engine))
		goto free_thread;

	thread->t_engine = engine;

	if (ugdb_set_events(thread, 0))
		goto detach_thread;

	return thread;

detach_thread:
	ugdb_detach_thread(thread, false);
free_thread:
	ugdb_destroy_thread(thread);
err:
	rcu_read_lock();
	task = pid_task(spid, PIDTYPE_PID);
	if (task && task->exit_state)
		task = NULL;
	rcu_read_unlock();

	return task ? ERR_PTR(-ENOMEM) : NULL;
}

static inline bool is_subthread(struct ugdb_process *process,
				struct ugdb_thread *thread)
{
	return thread && thread->t_process == process;
}

static inline void ugdb_reset_tinfo(struct ugdb *ugdb)
{
	ugdb->u_cur_tinfo = NULL;
}

static void ugdb_destroy_process(struct ugdb_process *process)
{
	struct ugdb *ugdb = process->p_ugdb;
	struct ugdb_thread *thread;

	mutex_lock(&ugdb->u_mutex);
	process->p_state |= P_DETACHING;
	list_del(&process->p_processes);

	if (is_subthread(process, ugdb->u_cur_hg))
		ugdb->u_cur_hg = NULL;
	if (is_subthread(process, ugdb->u_cur_hc))
		ugdb->u_cur_hc = NULL;

	/* I hope gdb won't do detach from under qfThreadInfo */
	if (ugdb->u_cur_tinfo) {
		printk(KERN_WARNING "ugdb: detach from under qfThreadInfo\n");
		ugdb_reset_tinfo(ugdb);
	}
	mutex_unlock(&ugdb->u_mutex);

	while (!list_empty(&process->p_threads)) {
		thread = list_first_entry(&process->p_threads,
				struct ugdb_thread, t_threads);
		ugdb_detach_thread(thread, true);
		ugdb_destroy_thread(thread);
	}

	BUG_ON(!list_empty(&process->p_threads));

	p_wp_free(&process->p_wp);

	kfree(process);
}

static void ugdb_destroy(struct ugdb *ugdb)
{
	struct ugdb_process *process;

	while (!list_empty(&ugdb->u_processes)) {
		process = list_first_entry(&ugdb->u_processes,
				struct ugdb_process, p_processes);
		ugdb_destroy_process(process);
	}

	BUG_ON(!list_empty(&ugdb->u_processes));
	BUG_ON(!list_empty(&ugdb->u_stopped));

	module_put(THIS_MODULE);
	kfree(ugdb);
}

static struct pid *get_next_pid(struct pid *main, struct pid *curr)
{
	struct task_struct *task;
	struct sighand_struct *sighand;
	struct pid *next = NULL;

	rcu_read_lock();
	/*
	 * If task/sighand is NULL we return NULL. This is fine if
	 * the caller is get_first_pid(), we should abort attaching.
	 *
	 * But this can also happen if curr was already attached,
	 * and this is wrong. Fortunately, this is very unlikely
	 * case. The attached sub-thread can't pass ->report_death,
	 * if it was reaped the caller of release_task() must be
	 * ptracer who re-parented this thread.
	 */
	task = pid_task(curr, PIDTYPE_PID);
	if (!task)
		goto unlock_rcu;

	// XXX: we need lock_task_sighand() but it is not exported,
	// so we ran race with de_thread().
	sighand = rcu_dereference(task->sighand);
	if (!sighand)
		goto unlock_rcu;

	spin_lock_irq(&sighand->siglock);
	for (;;) {
		task = next_thread(task);

		// XXX: if main is not leader we can race with exec.
		if (task_pid(task) == main)
			break;

		if (!task->exit_state) {
			next = get_pid(task_pid(task));
			break;
		}
	}
	spin_unlock_irq(&sighand->siglock);
unlock_rcu:
	rcu_read_unlock();

	return next;
}

static struct pid *get_first_pid(struct pid *main)
{
	struct task_struct *leader;

	rcu_read_lock();
	leader = pid_task(main, PIDTYPE_PID);
	if (leader && leader->exit_state)
		leader = NULL;
	rcu_read_unlock();

	/*
	 * The group-leader is alive, try to attach. If it exits
	 * before utrace_set_events(), get_first_pid() will be
	 * called again and it will notice ->exit_state != 0.
	 */
	if (leader)
		return get_pid(main);

	/*
	 * Try to find the live sub-thread. If the whole group
	 * is dead it returns NULL and the caller aborts.
	 */
	return get_next_pid(main, main);
}

static int ugdb_attach_all_threads(struct ugdb *ugdb,
				struct ugdb_process *process,
				struct pid *main_pid)
{
	struct ugdb_thread *thread;
	struct pid *curr_pid;

	mutex_lock(&ugdb->u_mutex);

	for (;;) {
		curr_pid = get_first_pid(main_pid);
		if (!curr_pid)
			goto abort;

		thread = ugdb_attach_thread(process, curr_pid);
		put_pid(curr_pid);

		if (IS_ERR(thread))
			goto abort;

		if (thread)
			break;
	}

	for (;;) {
		struct pid *next_pid;

		next_pid = get_next_pid(main_pid, curr_pid);
		if (!next_pid)
			break;

		thread = ugdb_attach_thread(process, next_pid);
		put_pid(next_pid);

		if (IS_ERR(thread))
			goto abort;

		if (!thread)
			continue;

		curr_pid = next_pid;
	}

	mutex_unlock(&ugdb->u_mutex);
	return 0;

abort:
	mutex_unlock(&ugdb->u_mutex);
	return -1;
}

static int ugdb_attach(struct ugdb *ugdb, int pid_nr)
{
	struct pid *main_pid;
	struct ugdb_process *process;
	int err;

	// XXX: check if exists
	// XXX: check if group leader ?

	err = -ESRCH;
	main_pid = find_get_pid(pid_nr);
	if (!main_pid)
		goto out;

	err = -ENOMEM;
	process = ugdb_create_process(ugdb, pid_nr);
	if (!process)
		goto free_pid;

	err = ugdb_attach_all_threads(ugdb, process, main_pid);
	if (err)
		ugdb_destroy_process(process);

free_pid:
	put_pid(main_pid);
out:
	return err;
}

static struct ugdb_process *ugdb_find_process(struct ugdb *ugdb, int pid)
{
	struct ugdb_process *process;

	list_for_each_entry(process, &ugdb->u_processes, p_processes) {
		if (process->p_pid == pid)
			return process;
	}

	return NULL;
}

static struct ugdb_thread *ugdb_find_thread(struct ugdb *ugdb, int pid, int tid)
{
	struct ugdb_process *process;
	struct ugdb_thread *thread;

	list_for_each_entry(process, &ugdb->u_processes, p_processes) {
		if (unlikely(!process_alive(process)))
			continue;
		if (pid && process->p_pid != pid)
			continue;

		list_for_each_entry(thread, &process->p_threads, t_threads) {
			if (WARN_ON(!thread_alive(thread)))
				continue;
			if (!tid || thread->t_tid == tid)
				return thread;
		}

		if (pid)
			break;
	}

	return NULL;
}

static int ugdb_detach(struct ugdb *ugdb, int pid)
{
	struct ugdb_process *process = ugdb_find_process(ugdb, pid);

	if (!process)
		return -1;

	ugdb_destroy_process(process);
	return 0;
}

#define CUR_TINFO_END	((struct ugdb_thread *)1)

static struct ugdb_thread *ugdb_advance_tinfo(struct ugdb *ugdb)
{
	struct ugdb_thread *cur, *nxt;
	struct ugdb_process *process;

	cur = ugdb->u_cur_tinfo;

	if (cur == CUR_TINFO_END) {
		ugdb->u_cur_tinfo = NULL;
		return NULL;
	}

	if (!cur) {
		list_for_each_entry(process, &ugdb->u_processes, p_processes) {
			if (unlikely(!process_alive(process)))
				continue;

			if (!list_empty(&process->p_threads)) {
				cur = list_first_entry(&process->p_threads,
						struct ugdb_thread, t_threads);
				break;
			}
		}

		if (!cur)
			return NULL;
	}

	process = cur->t_process;

	if (list_is_last(&cur->t_threads, &process->p_threads)) {
		nxt = CUR_TINFO_END;

		list_for_each_entry_continue(process, &ugdb->u_processes, p_processes) {
			if (unlikely(!process_alive(process)))
				continue;

			if (!list_empty(&process->p_threads)) {
				nxt = list_first_entry(&process->p_threads,
						struct ugdb_thread, t_threads);
				break;
			}
		}
	} else {
		nxt = list_first_entry(&cur->t_threads,
				struct ugdb_thread, t_threads);
	}

	ugdb->u_cur_tinfo = nxt;
	return cur;
}

// -----------------------------------------------------------------------------
static bool ugdb_add_stopped(struct ugdb_thread *thread, int stop_event)
{
	struct ugdb *ugdb = thread->t_ugdb;
	bool ret = false;

	ugdb_ck_stopped(ugdb);

	spin_lock(&ugdb->u_slock);
	if (stop_event == T_EV_NONE) {
		if (WARN_ON(thread->t_stop_state & T_STOP_ACK))
			goto unlock;
		if (WARN_ON(!list_empty(&thread->t_stopped)))
			goto unlock;

		/* raced with ugdb_cont_thread() */
		if (!(thread->t_stop_state & T_STOP_REQ))
			goto unlock;
	}

	if (thread->t_stop_state & T_STOP_ACK) {
		if (thread->t_stop_state & T_STOP_STOPPED)
			/*
			 * Alas, we can't report this event. We already
			 * reported T00 and there is no way to inform gdb
			 * the state of tracee was changed.
			 */
			goto unlock;
	} else {
		WARN_ON(thread->t_stop_state & T_STOP_STOPPED);

		thread->t_stop_state |= T_STOP_ACK;
		list_add_tail(&thread->t_stopped, &ugdb->u_stopped);

		if (ugdb->u_stop_state == U_STOP_IDLE) {
			ugdb->u_stop_state = U_STOP_PENDING;
			wake_up_all(&ugdb->u_wait);
		}
	}

	thread->t_stop_event = stop_event;
	ret = true;
unlock:
	spin_unlock(&ugdb->u_slock);

	return ret;
}

static void ugdb_process_exit(struct ugdb_thread *thread)
{
	struct ugdb *ugdb = thread->t_ugdb;
	int status;

	BUG_ON(!thread_alive(thread));

	ugdb_del_stopped(ugdb, thread);
	mark_thread_dead(thread);

	// XXX: OOPS, we can't read ->signal->group_exit_code !!!
	status = current->exit_code;
	if (ugdb_add_stopped(thread, T_EV_EXIT | status))
		return;

	WARN_ON(1);
}

static int ugdb_stop_thread(struct ugdb_thread *thread, bool all)
{
	struct ugdb *ugdb = thread->t_ugdb;
	int err;

	WARN_ON(!thread_alive(thread));

	ugdb_ck_stopped(ugdb);

	if (thread->t_stop_state != T_STOP_RUN) {
		if (!all || (thread->t_stop_state & T_STOP_ALL))
			return 0;
		/*
		 * Otherwise we should set T_STOP_ALL anyway,
		 *
		 *	(gdb) interrupt &
		 *	(gbd) interrupt -a &
		 *
		 * to ensure -a actually works if it races with clone.
		 */
	}

	err = -EALREADY;
	spin_lock(&ugdb->u_slock);
	if (thread->t_stop_state == T_STOP_RUN) {
		thread->t_stop_state = T_STOP_REQ;
		err = 0;
	}

	/*
	 * We hold ugdb->u_mutex, we can't race with ugdb_report_clone().
	 * ugdb->u_slock protects us against ugdb_add_stopped(). We can
	 * change ->t_stop_state even if we did not initiate this stop.
	 */
	if (all)
		thread->t_stop_state |= T_STOP_ALL;
	spin_unlock(&ugdb->u_slock);

	if (err)
		return 0;

	// XXX: we don't do UTRACE_STOP! this means we can't
	// stop TASK_STOPEED task. Need to discuss jctl issues.
	// if we do UTRACE_STOP we should call ugdb_add_stopped().

	ugdb_set_events(thread, UTRACE_EVENT(QUIESCE));
	err = ugdb_control(thread, UTRACE_INTERRUPT);
	if (err && err != -EINPROGRESS)
		return err;
	return 1;
}

static int ugdb_cont_thread(struct ugdb_thread *thread, bool all, int t_step)
{
	struct ugdb *ugdb = thread->t_ugdb;
	int ret;

	WARN_ON(!thread_alive(thread));

	ugdb_ck_stopped(ugdb);

	// XXX: gdb shouldn't explicitly cont an unreported thread
	WARN_ON(!all && !(thread->t_stop_state & T_STOP_STOPPED));

	if (thread->t_stop_state == T_STOP_RUN)
		return 0;

	spin_lock(&ugdb->u_slock);
	/*
	 * Nothing to do except clear the pending T_STOP_REQ.
	 */
	ret = 0;
	if (!(thread->t_stop_state & T_STOP_ACK))
		goto set_run;

	/*
	 * Alas. Thanks to remote protocol, we can't cont this
	 * thread. We probably already sent the notification, we
	 * can do nothing except ack that %Stop later in response
	 * to vStopped.
	 *
	 * OTOH, gdb shouldn't send 'c' if this thread was not
	 * reported as stopped. However, this means that gdb can
	 * see the new %Stop:T00 notification after vCont;c:pX.-1,
	 * it should handle this case correctly anyway. I hope.
	 *
	 * If this stop was not initiated by gdb we should not
	 * cancel it too, this event should be reported first.
	 */
	ret = -1;
	if (!(thread->t_stop_state & T_STOP_STOPPED))
		goto unlock;

	ret = 1;
	list_del_init(&thread->t_stopped);
set_run:
	thread->t_stop_state = T_STOP_RUN;
unlock:
	spin_unlock(&ugdb->u_slock);

	if (ret >= 0) {
		unsigned long events = 0;
		enum utrace_resume_action action = UTRACE_RESUME;

		thread->t_step = t_step;
		if (t_step) {
			/* to correctly step over syscall insn */
			events |= UTRACE_EVENT(SYSCALL_EXIT);
			action = UTRACE_SINGLESTEP;
		}

		// XXX: OK, this all is racy, and I do not see any
		// solution except: implement UTRACE_STOP_STICKY and
		// move this code up under the lock, or add
		// utrace_engine_ops->notify_stopped().

		// 1. UTRACE_RESUME is racy, this is fixeable.
		// 2. we need utrace_barrier() to close the race
		//    with the callback which is going to return
		//    UTRACE_STOP, but:
		//    	a) we can deadlock (solveable)
		//	b) in this case UTRACE_RESUME can race with
		//	   another stop initiated by tracee itself.

		ugdb_set_events(thread, events);
		ugdb_control(thread, action);
	}

	return ret;
}

static struct ugdb_thread *ugdb_next_stopped(struct ugdb *ugdb)
{
	struct ugdb_thread *thread = NULL;

	// XXX: temporary racy check
	WARN_ON(ugdb->u_stop_state == U_STOP_IDLE);

	spin_lock(&ugdb->u_slock);
	if (list_empty(&ugdb->u_stopped)) {
		ugdb->u_stop_state = U_STOP_IDLE;
	} else {
		ugdb->u_stop_state = U_STOP_SENT;

		thread = list_first_entry(&ugdb->u_stopped,
					struct ugdb_thread, t_stopped);

		thread->t_stop_state |= T_STOP_STOPPED;
		list_del_init(&thread->t_stopped);
	}
	spin_unlock(&ugdb->u_slock);

	return thread;
}

// -----------------------------------------------------------------------------
static bool ugdb_stop_pending(struct ugdb_thread *thread)
{
	if (!(thread->t_stop_state & T_STOP_REQ))
		return false;

	if (!(thread->t_stop_state & T_STOP_ACK))
		return ugdb_add_stopped(thread, T_EV_NONE);

	return true;
}

static u32 ugdb_report_quiesce(u32 action, struct utrace_engine *engine,
					unsigned long event)
{
	struct ugdb_thread *thread = engine->data;

	WARN_ON(!thread_alive(thread));

	/* ensure SIGKILL can't race with stop/cont in progress */
	if (event != UTRACE_EVENT(DEATH)) {
		if (ugdb_stop_pending(thread))
			return UTRACE_STOP;
	}

	return utrace_resume_action(action);
}

static int cont_signal(struct ugdb_thread *thread,
				struct k_sigaction *return_ka)
{
	int signr = T_EV_DATA(thread->t_stop_event);
	siginfo_t *info = thread->t_siginfo;

	thread->t_siginfo = NULL;

	if (WARN_ON(!valid_signal(signr)))
		return 0;

	if (!signr)
		return signr;
	/*
	 * Update the siginfo structure if the signal has changed.
	 */
	if (info->si_signo != signr) {
		info->si_signo = signr;
		info->si_errno = 0;
		info->si_code = SI_USER;
		info->si_pid = 0;
		info->si_uid = 0;
	}

	/* If the (new) signal is now blocked, requeue it. */
	if (sigismember(&current->blocked, signr)) {
		send_sig_info(signr, info, current);
		signr = 0;
	} else {
		spin_lock_irq(&current->sighand->siglock);
		*return_ka = current->sighand->action[signr - 1];
		spin_unlock_irq(&current->sighand->siglock);
	}

	return signr;
}

static bool wp_next_soft_trap(struct ugdb_thread *thread, siginfo_t *info);

static u32 ugdb_report_signal(u32 action, struct utrace_engine *engine,
				struct pt_regs *regs,
				siginfo_t *info,
				const struct k_sigaction *orig_ka,
				struct k_sigaction *return_ka)
{
	struct ugdb_thread *thread = engine->data;
	struct ugdb *ugdb = thread->t_ugdb;
	int signr;

	WARN_ON(!thread_alive(thread));

	switch (utrace_signal_action(action)) {
	case UTRACE_SIGNAL_HANDLER:
		if (WARN_ON(thread->t_siginfo))
			thread->t_siginfo = NULL;

		if (thread->t_step) {
			WARN_ON(orig_ka);
			// user_single_step_siginfo(current, regs, info);
			memset(info, 0, sizeof(*info));
			info->si_signo = SIGTRAP;
			break;
		}

		/* Fall through */

	default:
		if (orig_ka)
			break;

		/*
		 * It was UTRACE_SIGNAL_REPORT, but another tracer has
		 * changed utrace_report->result to deliver or stop.
		 * Fall through.
		 */

	case UTRACE_SIGNAL_REPORT:
		if (!thread->t_siginfo) {
			/* UTRACE_INTERRUPT from ugdb_report_syscall_exit() */
			if (thread->t_step) {
				WARN_ON(orig_ka);
				// user_single_step_siginfo(current, regs, info);
				memset(info, 0, sizeof(*info));
				info->si_signo = SIGTRAP;
				break;
			}
		} else {
			if (WARN_ON(thread->t_siginfo != info))
				return action;
			WARN_ON(T_EV_TYPE(thread->t_stop_event) != T_EV_SIGN);

			signr = cont_signal(thread, return_ka);
			if (signr) {
				/*
				 * Consider:
				 *
				 *	(gdb) signal SIG &
				 *	(gdb) interrupt
				 *
				 * We shouldn't miss the new stop request, so
				 * we do not return from here.
				 */
				action = UTRACE_RESUME | UTRACE_SIGNAL_DELIVER;
			}
		}

		if (ugdb_stop_pending(thread))
			return UTRACE_STOP | utrace_signal_action(action);

		if (thread->t_step)
			return UTRACE_SINGLESTEP | utrace_signal_action(action);
		return action;
	}

	WARN_ON(thread->t_siginfo);

	signr = info->si_signo;
	if (WARN_ON(!signr || !valid_signal(signr)))
		return action;

	if (wp_next_soft_trap(thread, info)) {
		if (ugdb_stop_pending(thread))
			return UTRACE_STOP | UTRACE_SIGNAL_IGN;

		return UTRACE_SINGLESTEP | UTRACE_SIGNAL_IGN;
	}

	if (sigismember(&ugdb->u_sig_ign, signr))
		return action;

	if (ugdb_add_stopped(thread, T_EV_SIGN | signr)) {
		thread->t_siginfo = info;
		/*
		 * Make sure the subsequent UTRACE_SIGNAL_REPORT clears
		 * ->t_siginfo before return from get_signal_to_deliver().
		 */
		if (utrace_control(current, engine, UTRACE_INTERRUPT))
			WARN_ON(1);
		return UTRACE_STOP | UTRACE_SIGNAL_IGN;
	}

	/*
	 * We already reported T00 to gdb. We can't change our state,
	 * we are already stopped from gdb pov. Push back this signal
	 * to report it later, after "continue".
	 *
	 * Not multitrace-friendly.
	 */
	return UTRACE_STOP | UTRACE_SIGNAL_REPORT | UTRACE_SIGNAL_HOLD;
}

static u32 ugdb_report_syscall_exit(u32 action, struct utrace_engine *engine,
					struct pt_regs *regs)
{
	struct ugdb_thread *thread = engine->data;

	if (thread->t_step)
		return UTRACE_INTERRUPT;

	printk(KERN_INFO "ugdb: unexpected SYSCALL_EXIT\n");
	return action;
}

static bool is_already_attached(struct ugdb_process *process,
				struct task_struct *task)
{
	struct ugdb_thread *thread;

	if (likely(!task_utrace_flags(task)))
		return false;

	/*
	 * Currently there is no way to know if it was attached by us.
	 * We can't trust utrace_attach_task(UTRACE_ATTACH_MATCH_OPS),
	 * ugdb attaches without UTRACE_ATTACH_EXCLUSIVE. We have to
	 * check every attached thread.
	 *
	 * This is really bad, but without multitracing this can only
	 * happen in unlikely case right after ugdb_attach_all_threads().
	 */
	list_for_each_entry(thread, &process->p_threads, t_threads) {
		if (thread->t_spid == task_pid(task))
			return true;
	}

	return false;
}

static void ugdb_wp_clone(struct ugdb_thread *thread);

static u32 ugdb_report_clone(u32 action, struct utrace_engine *engine,
			       unsigned long clone_flags,
			       struct task_struct *task)
{
	struct ugdb_thread *thread = engine->data;
	struct ugdb_process *process = thread->t_process;
	struct ugdb *ugdb = thread->t_ugdb;
	struct ugdb_thread *new_thread;

	WARN_ON(!thread_alive(thread));

	if (!(clone_flags & CLONE_THREAD))
		goto out;

	mutex_lock(&ugdb->u_mutex);
	if (process->p_state & P_DETACHING)
		goto unlock;
	/*
	 * This can only happen if we raced with ugdb_attach() which
	 * could attach both current and the new PF_STARTING child.
	 */
	if (unlikely(is_already_attached(process, task)))
		goto unlock;

	new_thread = ugdb_attach_thread(process, task_pid(task));
	BUG_ON(!new_thread);

	if (WARN_ON(IS_ERR(new_thread)))
		goto unlock;

	if (thread->t_stop_state & T_STOP_ALL)
		ugdb_stop_thread(new_thread, false);
	else
		ugdb_wp_clone(new_thread);

unlock:
	mutex_unlock(&ugdb->u_mutex);
out:
	return utrace_resume_action(action);
}

static u32 ugdb_report_death(struct utrace_engine *engine,
				bool group_dead, int signal)
{
	struct ugdb_thread *thread = engine->data;
	struct ugdb_process *process = thread->t_process;
	struct ugdb *ugdb = thread->t_ugdb;

	WARN_ON(!thread_alive(thread));

	mutex_lock(&ugdb->u_mutex);
	if (process->p_state & P_DETACHING)
		goto unlock;

	if (ugdb->u_cur_hg == thread)
		ugdb->u_cur_hg = NULL;
	if (ugdb->u_cur_hc == thread)
		ugdb->u_cur_hc = NULL;

	if (ugdb->u_cur_tinfo == thread)
		ugdb_advance_tinfo(ugdb);

	if (list_is_singular(&process->p_threads))
		ugdb_process_exit(thread);
	else
		ugdb_destroy_thread(thread);

unlock:
	mutex_unlock(&ugdb->u_mutex);

	return UTRACE_DETACH;
}

static const struct utrace_engine_ops ugdb_utrace_ops = {
	.report_quiesce		= ugdb_report_quiesce,
	.report_signal		= ugdb_report_signal,
	.report_syscall_exit	= ugdb_report_syscall_exit,
	.report_clone		= ugdb_report_clone,
	.report_death		= ugdb_report_death,
};

// -----------------------------------------------------------------------------
static inline int pb_size(struct pbuf *pb)
{
	return pb->cur - pb->buf;
}

static inline int pb_room(struct pbuf *pb)
{
	return pb->buf + BUFFER_SIZE - pb->cur;
}

static inline void pb_putc(struct pbuf *pb, char c)
{
	if (WARN_ON(pb->cur >= pb->buf + BUFFER_SIZE))
		return;
	*pb->cur++ = c;
}

static void pb_memcpy(struct pbuf *pb, const void *data, int size)
{
	if (WARN_ON(size > pb_room(pb)))
		return;
	memcpy(pb->cur, data, size);
	pb->cur += size;
}

static inline void pb_puts(struct pbuf *pb, const char *s)
{
	pb_memcpy(pb, s, strlen(s));
}

static inline void pb_putb(struct pbuf *pb, unsigned char val)
{
	static char hex[] = "0123456789abcdef";
	pb_putc(pb, hex[(val & 0xf0) >> 4]);
	pb_putc(pb, hex[(val & 0x0f) >> 0]);
}

static void pb_putbs(struct pbuf *pb, const char *data, int size)
{
	while (size--)
		pb_putb(pb, *data++);
}

static inline void __pb_start(struct pbuf *pb, char pref)
{
	WARN_ON(pb->pkt);
	pb_putc(pb, pref);
	pb->pkt = pb->cur;
}

static inline void pb_start(struct pbuf *pb)
{
	return __pb_start(pb, '$');
}

static inline void pb_cancel(struct pbuf *pb)
{
	if (WARN_ON(!pb->pkt))
		return;

	pb->cur = pb->pkt - 1;
	pb->pkt = NULL;
}

static void pb_end(struct pbuf *pb)
{
	unsigned char csm = 0;
	char *pkt = pb->pkt;

	pb->pkt = NULL;
	if (WARN_ON(!pkt))
		return;

	while (pkt < pb->cur) {
		/* pb_qfer() can write '%' */
		WARN_ON(*pkt == '$' || *pkt == '#');
		csm += (unsigned char)*pkt++;
	}

	pb_putc(pb, '#');
	pb_putb(pb, csm);
}

static inline void pb_packs(struct pbuf *pb, const char *s)
{
	pb_start(pb);
	pb_puts(pb, s);
	pb_end(pb);
}

static void __attribute__ ((format(printf, 3, 4)))
__pb_format(struct pbuf *pb, bool whole_pkt, const char *fmt, ...)
{
	int room = pb_room(pb), size;
	va_list args;

	if (whole_pkt)
		pb_start(pb);

	va_start(args, fmt);
	size = vsnprintf(pb->cur, room, fmt, args);
	va_end(args);

	if (WARN_ON(size > room))
		return;

	pb->cur += size;

	if (whole_pkt)
		pb_end(pb);
}

#define pb_printf(pb, args...)	__pb_format((pb), false, args)
#define pb_packf(pb, args...)	__pb_format((pb), true,  args)

static int pb_qfer(struct pbuf *pb, const void *_data, int len, bool more)
{
	const unsigned char *data = _data;
	int i;

	if (pb_room(pb) < 3 + len * 2) {
		WARN_ON(1);
		return -EOVERFLOW;
	}

	pb_start(pb);
	pb_putc(pb, more ? 'm' : 'l');

	for (i = 0; i < len; ++i) {
		unsigned char c = data[i];

		if (c == '$' || c == '#' || c == '}' || c == '*') {
			pb_putc(pb, '}');
			c ^= 0x20;
		}

		pb_putc(pb, c);
	}

	pb_end(pb);

	return 0;
}

// XXX: improve me!
static inline int pb_max_bs_size(struct pbuf *pb)
{
	int size = pb_room(pb) - 4;
	return size > 0 ? size / 2 : 0;
}
static inline void *pb_alloc_bs(struct pbuf *pb, int size)
{
	if (unlikely(pb_room(pb) < 2 * size + 4))
		return NULL;
	return pb->cur + size + 1;
}

static inline void *pb_alloc_tmp(struct pbuf *pb, int size)
{
	if (unlikely(pb_room(pb) < size))
		return NULL;
	return pb->cur + BUFFER_SIZE - size;
}

static inline void pb_flush(struct pbuf *pb, int size)
{
	int keep = pb_size(pb) - size;
	if (keep)
		memmove(pb->buf, pb->buf + size, keep);
	pb->cur -= size;
}

static int pb_copy_to_user(struct pbuf *pb, char __user *ubuf, int size)
{
	int copy = min(size, pb_size(pb));

	if (!copy)
		return -EAGAIN;

	if (o_remote_debug)
		printk(KERN_INFO "<= %.*s\n", min(copy, 64), pb->buf);

	if (copy_to_user(ubuf, pb->buf, copy))
		return -EFAULT;

	pb_flush(pb, copy);
	return copy;
}

// -----------------------------------------------------------------------------
static typeof(access_process_vm) *u_access_process_vm;

static struct task_struct *
ugdb_prepare_examine(struct ugdb *ugdb, struct utrace_examiner *exam);

static int
ugdb_finish_examine(struct ugdb *ugdb, struct utrace_examiner *exam);

static inline int ugdb_readmeam(struct ugdb *ugdb,
				unsigned long addr, int size, void *kbuf)
{
	struct utrace_examiner exam;
	struct task_struct *task;
	int ret = -ESRCH;

	task = ugdb_prepare_examine(ugdb, &exam);
	if (!task)
		return ret;

	ret = u_access_process_vm(task, addr, kbuf, size, 0);

	if (ugdb_finish_examine(ugdb, &exam))
		ret = -ESRCH;

	return ret;
}

#define WP_CHANGED		0

struct wp {
	unsigned long		flags;
	struct list_head	wp_node;

	unsigned long		addr;
	int			size;
	unsigned long		data[0];
};

static void p_wp_free(struct p_wp *p_wp)
{
	struct wp *wp, *n;

	list_for_each_entry_safe(wp, n, &p_wp->wp_list, wp_node) {
		p_wp->wp_cnt -= 1;
		p_wp->wp_mem -= wp->size;
		kfree(wp);
	}

	WARN_ON(p_wp->wp_cnt || p_wp->wp_mem);
}

static inline struct wp *wp_alloc(struct ugdb *ugdb, unsigned long addr, int size)
{
	struct wp *wp;

	wp = kmalloc(sizeof(*wp) + size, GFP_KERNEL);
	if (!wp)
		return wp;

	wp->flags = 0;
	wp->addr = addr;
	wp->size = size;

	// XXX: and what should we do if it fails?
	ugdb_readmeam(ugdb, addr, size, wp->data);

	return wp;
}

static inline struct wp *wp_find(struct p_wp *p_wp, unsigned long addr, int size)
{
	struct wp *wp;

	list_for_each_entry(wp, &p_wp->wp_list, wp_node)
		if (wp->addr == addr && wp->size == size)
			return wp;

	return NULL;
}

#define WP_MAX_CNT	64
#define WP_MAX_MEM	4096
#define WP_MAX_LEN	1024

// -----------------------------------------------------------------------------
#include <asm/debugreg.h>

#define X86_BREAKPOINT_LEN_1		0x40
#define X86_BREAKPOINT_LEN_2		0x44
#define X86_BREAKPOINT_LEN_4		0x4c
#define X86_BREAKPOINT_LEN_8		0x48

#define X86_BREAKPOINT_WRITE	0x81

#define HBP_NUM 4

static inline unsigned long
__encode_dr7(int drnum, unsigned int len, unsigned int type)
{
	unsigned long bp_info;

	bp_info = (len | type) & 0xf;
	bp_info <<= (DR_CONTROL_SHIFT + drnum * DR_CONTROL_SIZE);
	bp_info |= (DR_GLOBAL_ENABLE << (drnum * DR_ENABLE_SIZE));

	return bp_info;
}

static inline bool x86_hw_wp_active(struct x86_hw_wp *hw_wp)
{
	return hw_wp->nr;
}

static int x86_build_hw_wp(struct p_wp *p_wp, struct x86_hw_wp *hw_wp)
{
	struct wp *wp;
	unsigned long dr7 = 0;
	unsigned int nr = 0;

	hw_wp->nr = 0;
	hw_wp->dr7 = 0;

	list_for_each_entry(wp, &p_wp->wp_list, wp_node) {
		u8 hw_len;

		if (nr >= HBP_NUM)
			return -EINVAL;

		switch (wp->size) {
			default:
				return -EINVAL;

			case 1:
				hw_len = X86_BREAKPOINT_LEN_1;
				break;

			case 2:
				hw_len = X86_BREAKPOINT_LEN_2;
				break;

			case 4:
				hw_len = X86_BREAKPOINT_LEN_4;
				break;

			case 8:
				hw_len = X86_BREAKPOINT_LEN_8;
				break;
		}

		if (wp->addr & (wp->size - 1))
			return -EINVAL;

		// XXX: TIF_IA32? IA32_PAGE_OFFSET?
		if (wp->addr >= TASK_SIZE - 8)
			return -EINVAL;

		hw_wp->dr[nr] = wp->addr;

		dr7 |= __encode_dr7(nr, hw_len, X86_BREAKPOINT_WRITE);

		nr++;
	}

	hw_wp->nr = nr;
	hw_wp->dr7 = dr7;

	return 0;
}

static void x86_update_hw_wp(struct ugdb *ugdb, struct ugdb_process *process)
{
	bool old_active, new_active;
	struct p_wp *p_wp = &process->p_wp;
	struct x86_hw_wp hw_wp;

	x86_build_hw_wp(p_wp, &hw_wp);

	old_active = x86_hw_wp_active(&p_wp->hw_wp);
	new_active = x86_hw_wp_active(&hw_wp);

	if (!old_active && !new_active)
		return;

	mutex_lock(&ugdb->u_mutex);
	p_wp->hw_wp = hw_wp;

	if (old_active) {
		struct ugdb_thread *thread;

		list_for_each_entry(thread, &process->p_threads, t_threads) {
			if (thread->t_wp.hw_wp_state == HW_WP_ACTIVE)
				thread->t_wp.hw_wp_state = HW_WP_INVALID;
		}
	}
	mutex_unlock(&ugdb->u_mutex);
}

#include <linux/ptrace.h>
static typeof(arch_ptrace) *u_arch_ptrace;

static int x86_set_dr(struct task_struct *task, int drnum, unsigned long val)
{
	int ret = u_arch_ptrace(task, PTRACE_POKEUSR,
				offsetof(struct user, u_debugreg[drnum]),
				val);

	if (ret)
		printk(KERN_INFO "ugdb: set_dr(%d, %d, %lx) failed: %d\n",
					task->pid, drnum, val, ret);

	return ret;
}

static void x86_apply_hw_bp(struct ugdb_thread *thread)
{
	struct x86_hw_wp *hw_wp = &thread->t_process->p_wp.hw_wp;
	struct task_struct *task = thread_to_task(thread);
	int state = HW_WP_INVALID;
	int nr;

	if (!task)
		goto err;

	for (nr = 0; nr < hw_wp->nr; ++nr) {
		if (x86_set_dr(task, nr, hw_wp->dr[nr]))
			goto err;
	}

	if (x86_set_dr(task, 7, hw_wp->dr7))
		goto err;

	state = x86_hw_wp_active(hw_wp) ? HW_WP_ACTIVE : HW_WP_NONE;
err:
	thread->t_wp.hw_wp_state = state;
}

static inline bool need_sync_hw_wp(struct ugdb_thread *thread)
{
	int state = thread->t_wp.hw_wp_state;
	bool active;

	if (state == HW_WP_INVALID)
		return true;

	active = x86_hw_wp_active(&thread->t_process->p_wp.hw_wp);
	if (active != (state == HW_WP_ACTIVE))
		return true;

	return false;
}

static void thread_sync_hw_wp(struct ugdb_thread *thread)
{
	if (!need_sync_hw_wp(thread))
		return;

	x86_apply_hw_bp(thread);
}

static bool has_watchpoints(struct ugdb_thread *thread)
{
	return unlikely(!list_empty(&thread->t_process->p_wp.wp_list));
}

static inline bool has_soft_watchpoints(struct ugdb_thread *thread)
{
	return has_watchpoints(thread) &&
		thread->t_wp.hw_wp_state != HW_WP_ACTIVE;
}

// -----------------------------------------------------------------------------
static const char *handle_z(struct ugdb *ugdb, char *cmd, int len)
{
	unsigned long addr, size;
	bool insert;
	int type;

	struct ugdb_process *process;
	struct p_wp *p_wp;
	struct wp *wp;

	insert = (*cmd++ == 'Z');
	if (sscanf(cmd, "%d,%lx,%lx", &type, &addr, &size) != 3)
		return "E01";

	if (type != 2)
		return "";

	if (size > WP_MAX_LEN)
		return "E01";

	process = NULL;
	mutex_lock(&ugdb->u_mutex);
	if (ugdb->u_cur_hg)
		process = ugdb->u_cur_hg->t_process;
	mutex_unlock(&ugdb->u_mutex);

	if (!process)
		return "E01";

	p_wp = &process->p_wp;
	wp = wp_find(p_wp, addr, size);

	if (insert) {
		if (wp)
			return "E01";

		if (!size || size > WP_MAX_LEN ||
		    p_wp->wp_cnt + 1 > WP_MAX_CNT ||
		    p_wp->wp_mem + size > WP_MAX_MEM)
			return "E01";

		wp = wp_alloc(ugdb, addr, size);
		if (!wp)
			return "E01";

		down_write(&p_wp->wp_sem);
		list_add_tail(&wp->wp_node, &p_wp->wp_list);
		up_write(&p_wp->wp_sem);

		p_wp->wp_cnt += 1;
		p_wp->wp_mem += size;
	} else {
		if (!wp)
			return "E01";

		down_write(&p_wp->wp_sem);
		list_del(&wp->wp_node);
		up_write(&p_wp->wp_sem);

		if (test_bit(WP_CHANGED, &wp->flags)) {
			struct ugdb_thread *thread;

			mutex_lock(&ugdb->u_mutex);
			list_for_each_entry(thread, &process->p_threads,
								t_threads) {
				if (thread->t_wp.wp_cur == wp)
					thread->t_wp.wp_cur = NULL;
			}
			mutex_unlock(&ugdb->u_mutex);
		}

		p_wp->wp_cnt -= 1;
		p_wp->wp_mem -= size;

		kfree(wp);
	}

	x86_update_hw_wp(ugdb, process);

	return "OK";
}

#define WP_NO_WATCH	-1ul

static unsigned long get_cur_watch(struct ugdb_thread *thread)
{
	struct wp *wp = thread->t_wp.wp_cur;

	if (likely(!wp))
		return WP_NO_WATCH;

	thread->t_wp.wp_cur = NULL;
	clear_bit(WP_CHANGED, &wp->flags);

	return wp->addr;
}

static int memcmp_user(void *kp, void __user *up, int sz)
{
	unsigned char data[64];

	while (sz) {
		int size = sizeof(data);
		if (size > sz)
			size = sz;

		if (copy_from_user(data, up, size))
			;

		if (memcmp(data, kp, size))
			return 1;

		up += size;
		kp += size;
		sz -= size;
	}

	return 0;
}

static bool check_one_wp(struct wp *wp)
{
	void __user *up = (void __user*)wp->addr;

	if (test_bit(WP_CHANGED, &wp->flags))
		return false;

	if (!memcmp_user(wp->data, up, wp->size))
		return false;

	if (test_and_set_bit(WP_CHANGED, &wp->flags))
		return false;

	copy_from_user(wp->data, up, wp->size);

	return true;
}

static bool check_watchpoints(struct ugdb_thread *thread)
{
	struct p_wp *p_wp = &thread->t_process->p_wp;
	struct wp *wp;
	bool ret = false;

	down_read(&p_wp->wp_sem);
	list_for_each_entry(wp, &p_wp->wp_list, wp_node) {
		ret = check_one_wp(wp);
		if (ret) {
			thread->t_wp.wp_cur = wp;
			break;
		}
	}
	up_read(&p_wp->wp_sem);

	return ret;
}

static bool wp_next_soft_trap(struct ugdb_thread *thread, siginfo_t *info)
{
	if (!has_watchpoints(thread))
		return false;

	if (check_watchpoints(thread))
		return false;

	return	info->si_signo == SIGTRAP &&
		thread->t_step == T_STEP_SOFT &&
		// XXX: this filters out do_int3() only.
		// XXX: this is not right, we probably need
		// to check trap_no == 1, but then we need
		// unexported user_single_step_siginfo() for
		// ugdb_report_signal().
		current->thread.trap_no != 3;
}

static void ugdb_wp_clone(struct ugdb_thread *thread)
{
	if (!has_watchpoints(thread))
		return;

	thread_sync_hw_wp(thread);

	if (has_soft_watchpoints(thread)) {
		thread->t_step = T_STEP_SOFT;
		ugdb_set_events(thread, UTRACE_EVENT(SYSCALL_EXIT));
		ugdb_control(thread, UTRACE_SINGLESTEP);
	}
}

// -----------------------------------------------------------------------------
// XXX: include/gdb/signals.h:target_signal
// incomplete: 7, 29, rt?
static int to_gdb_sigmap[] = {
	[SIGHUP]	= 1,
	[SIGINT]	= 2,
	[SIGQUIT]	= 3,
	[SIGILL]	= 4,
	[SIGTRAP]	= 5,
	[SIGABRT]	= 6,
	[SIGIOT]	= 0,	/* ??? */
	[SIGBUS]	= 10,
	[SIGFPE]	= 8,
	[SIGKILL]	= 9,
	[SIGUSR1]	= 30,
	[SIGSEGV]	= 11,
	[SIGUSR2]	= 31,
	[SIGPIPE]	= 13,
	[SIGALRM]	= 14,
	[SIGTERM]	= 15,
	[SIGSTKFLT]	= 0,	/* ??? */
	[SIGCHLD]	= 20,
	[SIGCONT]	= 19,
	[SIGSTOP]	= 17,
	[SIGTSTP]	= 18,
	[SIGTTIN]	= 21,
	[SIGTTOU]	= 22,
	[SIGURG]	= 16,
	[SIGXCPU]	= 24,
	[SIGXFSZ]	= 25,
	[SIGVTALRM]	= 26,
	[SIGPROF]	= 27,
	[SIGWINCH]	= 28,
	[SIGIO]		= 23,
	[SIGPWR]	= 32,
	[SIGSYS]	= 12,
};

static int sig_to_gdb(unsigned sig)
{
	if (sig < ARRAY_SIZE(to_gdb_sigmap) && to_gdb_sigmap[sig])
		return to_gdb_sigmap[sig];
	return sig;

}

static int sig_from_gdb(unsigned sig)
{
	int i;

	// XXX: valid_signal() is wrong, gdb has its own idea
	// about signals. fix to_gdb_sigmap[].
	if (!sig || !valid_signal(sig))
		return 0;

	for (i = 0; i < ARRAY_SIZE(to_gdb_sigmap); i++) {
		if (to_gdb_sigmap[i] == sig)
			return i;
	}

	return sig;
}

static int ugdb_report_stopped(struct ugdb *ugdb, bool async)
{
	struct ugdb_thread *thread;
	int pid, tid, event, data;
	unsigned long watch;
	struct pbuf *pb;
	char ex_r;

	mutex_lock(&ugdb->u_mutex);
	thread = ugdb_next_stopped(ugdb);
	if (!thread)
		goto unlock;

	event = thread->t_stop_event;
	watch = get_cur_watch(thread);

	WARN_ON(thread_alive(thread) != (T_EV_TYPE(event) != T_EV_EXIT));

	pid = thread->t_process->p_pid;
	tid = thread->t_tid;
unlock:
	mutex_unlock(&ugdb->u_mutex);

	if (!thread)
		return false;

	pb = &ugdb->u_pbuf;

	// XXX: damn, cleanup me...
	if (async) {
		__pb_start(pb, '%');
		pb_puts(pb, "Stop:");
	} else {
		pb_start(pb);
	}

	data = T_EV_DATA(event);
	switch (T_EV_TYPE(event)) {
	case T_EV_EXIT:
		if (data & 0xff) {
			data = sig_to_gdb(data & 0xff);
			ex_r = 'X';
		} else {
			data >>= 8;
			ex_r = 'W';
		}

		pb_printf(pb, "%c%x;process:%x", ex_r, data, pid);
		ugdb_destroy_process(thread->t_process);
		break;

	case T_EV_SIGN:
	case T_EV_NONE:
		pb_printf(pb, "T%02xthread:p%x.%x;",
					sig_to_gdb(data), pid, tid);

		if (unlikely(watch != WP_NO_WATCH))
			pb_printf(pb, "watch:%lx;", watch);

		break;

	default:
		printk(KERN_INFO "ugdb: bad stop event %x\n", event);
	}

	pb_end(pb);

	return true;
}

const char *handle_vstopped(struct ugdb *ugdb)
{
	if (ugdb->u_stop_state != U_STOP_SENT)
		return "E01";

	if (ugdb_report_stopped(ugdb, false))
		return NULL;

	return "OK";
}

static const char *handle_thread_info(struct ugdb *ugdb, bool start)
{
	struct ugdb_thread *thread;
	int pid = 0, tid;

	mutex_lock(&ugdb->u_mutex);
	if (start)
		ugdb_reset_tinfo(ugdb);
	else if (!ugdb->u_cur_tinfo)
		printk(KERN_INFO "ugdb: unexpected qsThreadInfo\n");

	thread = ugdb_advance_tinfo(ugdb);
	if (thread) {
		pid = thread->t_process->p_pid;
		tid = thread->t_tid;
	}
	mutex_unlock(&ugdb->u_mutex);

	if (!pid)
		return start ? "E01" : "l";

	pb_packf(&ugdb->u_pbuf, "mp%x.%x", pid, tid);
	return NULL;
}

static char *parse_xid(char *str, int *ppid, bool multi)
{
	if (*str == '-') {
		str++;

		if (multi && *str++ == '1')
			*ppid = -1;
		else
			str = NULL;
	} else {
		char *cur = str;

		*ppid = simple_strtoul(cur, &str, 16);
		if (str == cur)
			str = NULL;
	}

	return str;
}

static char *parse_pid_tid(char *str, int *ppid, int *ptid, bool multi)
{
	if (*str++ != 'p')
		return NULL;

	str = parse_xid(str, ppid, multi);
	if (!str)
		return NULL;

	if (*str++ != '.')
		return NULL;

	str = parse_xid(str, ptid, multi);
	if (!str)
		return NULL;

	return str;
}

static const char *handle_set_cur(struct ugdb *ugdb, char *cmd)
{

	struct ugdb_thread **pthread;
	int pid, tid;

	switch (*cmd++) {
	case 'g':
		pthread = &ugdb->u_cur_hg;
		break;

	case 'c':
		pthread = &ugdb->u_cur_hc;
		break;

	default:
		goto err;
	}

	if (!parse_pid_tid(cmd, &pid, &tid, false))
		goto err;

	mutex_lock(&ugdb->u_mutex);
	*pthread = ugdb_find_thread(ugdb, pid, tid);
	mutex_unlock(&ugdb->u_mutex);

	if (*pthread)
		return "OK";

err:
	return "E01";
}

static const char *handle_ck_alive(struct ugdb *ugdb, char *cmd)
{
	struct ugdb_thread *thread;
	int pid = 0, tid;

	if (!parse_pid_tid(cmd, &pid, &tid, false))
		goto err;

	mutex_lock(&ugdb->u_mutex);
	thread = ugdb_find_thread(ugdb, pid, tid);
	mutex_unlock(&ugdb->u_mutex);

	if (thread)
		return "OK";

err:
	return "E01";
}

static int parse_pid(char *str)
{
	int pid;

	if (!parse_xid(str, &pid, false))
		return 0;

	return pid;
}

static const char *handle_vattach(struct ugdb *ugdb, char *cmd)
{
	int pid = parse_pid(cmd);

	if (pid && !ugdb_attach(ugdb, pid))
		return "OK";

	return "E01";
}

static const char *handle_detach(struct ugdb *ugdb, char *cmd)
{
	int pid;

	if (*cmd++ != ';')
		goto err;

	pid = parse_pid(cmd);
	if (pid && !ugdb_detach(ugdb, pid))
		return "OK";

err:
	return "E01";
}

typedef int (*each_func_t)(struct ugdb_thread *, void *);

static int ugdb_do_each_thread(struct ugdb *ugdb, int pid, int tid,
				each_func_t func, void *arg)
{
	struct ugdb_process *process;
	struct ugdb_thread *thread;
	int ret = -ESRCH;

	list_for_each_entry(process, &ugdb->u_processes, p_processes) {
		if (unlikely(!process_alive(process)))
			continue;
		if (pid > 0 && process->p_pid != pid)
			continue;

		list_for_each_entry(thread, &process->p_threads, t_threads) {
			if (WARN_ON(!thread_alive(thread)))
				continue;
			if (tid > 0 && thread->t_tid != tid)
				continue;

			ret = func(thread, arg);
			if (ret)
				goto out;

			if (tid >= 0)
				break;
		}

		if (pid >= 0)
			break;
	}

out:
	return ret;
}

static int thread_cont_signal(struct ugdb_thread *thread, int signr)
{
	/*
	 * T_STOP_STOPPED was set under ->u_slock so we can't race
	 * with ugdb_add_stopped() and get the wrong t_stop_event.
	 * And, the tracee never changes it after T_STOP_STOPPED.
	 */

	switch (T_EV_TYPE(thread->t_stop_event)) {
	case T_EV_SIGN:
		WARN_ON(!T_EV_DATA(thread->t_stop_event));
		thread->t_stop_event = T_EV_SIGN | signr;
		break;

	default:
		if (!signr)
			break;

		// XXX: temporary hack, will be reported.
		// but perhaps this is what we want ???
		kill_pid(thread->t_spid, signr, 0);
		break;
	}

	return 0;
}

static int ugdb_resume_thread(struct ugdb_thread *thread, int signr, bool step,
				bool all)
{
	int t_step;
	int ret = 0;

	/*
	 * Otherwise I do not know what to do if sig/step, and anyway
	 * I don't think gdb can try to cont a thread which was not
	 * reported as stopped.
	 */
	if (!(thread->t_stop_state & T_STOP_STOPPED))
		return ret;

	ret = thread_cont_signal(thread, signr);
	if (ret < 0)
		return ret;

	thread_sync_hw_wp(thread);

	t_step = 0;
	if (step)
		t_step = T_STEP_HARD;
	else if (has_soft_watchpoints(thread))
		t_step = T_STEP_SOFT;

	return ugdb_cont_thread(thread, all, t_step);
}

static const char *handle_c(struct ugdb *ugdb, char *cmd)
{
	struct ugdb_thread *thread;
	const char *rc = "E01";
	int gdbsig, signr = 0;
	bool step;

	step = (*cmd == 'S' || *cmd == 's');

	switch (*cmd++) {
	case 'C':
	case 'S':
		gdbsig = simple_strtoul(cmd, &cmd, 16);
		if (!gdbsig)
			return rc;

		signr = sig_from_gdb(gdbsig);
		if (!signr)
			printk(KERN_INFO "ugdb: sorry, can't map signal %d\n",
					gdbsig);

		if (*cmd == ';')
			++cmd;
		/* fall */

	case 'c':
	case 's':
		if (!*cmd)
			break;

		printk(KERN_INFO "ugdb: $c ADDR not implemented\n");
		return rc;

		break;
	}

	mutex_lock(&ugdb->u_mutex);
	thread = ugdb->u_cur_hc;
	if (!thread)
		goto unlock;

	if (ugdb_resume_thread(thread, signr, step, false) <= 0)
		goto unlock;

	/* Suprise: non-stop should not reply! */
	rc = NULL;
unlock:
	mutex_unlock(&ugdb->u_mutex);

	return rc;
}

struct vcont_arg {
	int	pid, tid;
	bool	intr;
	int	signr;
	bool	step;
};

static char* parse_vcont_arg(char *cmd, struct vcont_arg *vcont)
{
	vcont->intr = false;
	vcont->signr = 0;

	switch (*cmd++) {
	case 't':
		vcont->intr = true;
		break;

	case 'S':
		vcont->signr = 1;
	case 's':
		vcont->step = true;
		break;

	case 'C':
		vcont->signr = 1;
	case 'c':
		vcont->step = false;
		break;

	default:
		goto err;
	}

	if (vcont->signr) {
		int gdbsig = simple_strtoul(cmd, &cmd, 16);
		if (!gdbsig)
			goto err;

		vcont->signr = sig_from_gdb(gdbsig);
		if (!vcont->signr)
			printk(KERN_INFO "ugdb: sorry, can't map signal %d\n",
				 	gdbsig);
	}

	vcont->pid = -1;
	vcont->tid = -1;
	if (*cmd == ':') {
		cmd = parse_pid_tid(cmd + 1, &vcont->pid, &vcont->tid, true);
		if (!cmd)
			goto err;
	}

	return cmd;
err:
	return NULL;
}

static int do_vcont_thread(struct ugdb_thread *thread, void *arg)
{
	struct vcont_arg *vcont = arg;
	unsigned long genctr;
	bool all;
	int err;

	genctr = thread->t_ugdb->u_genctr;
	if (genctr == thread->t_genctr)
		return 0;

	WARN_ON_ONCE(u_after(thread->t_genctr, genctr));

	thread->t_genctr = genctr;
	all = (vcont->tid < 0);

	if (vcont->intr)
		err = ugdb_stop_thread(thread, all);
	else
		err = ugdb_resume_thread(thread, vcont->signr, vcont->step,
					all);

	if (err < 0)
		return err;
	return 0;
}

static const char *handle_vcont(struct ugdb *ugdb, char *cmd)
{
	const char *rc = "E01";

	switch (*cmd) {
	case ';':
		break;
	case '?':
		return "vCont;t;c;C;s;S";
	default:
		return "E01";
	}

	mutex_lock(&ugdb->u_mutex);
	ugdb->u_genctr++;

	while (*cmd) {
		struct vcont_arg vcont_arg;
		int err;

		if (*cmd++ != ';')
			goto unlock;

		cmd = parse_vcont_arg(cmd, &vcont_arg);
		if (!cmd)
			goto unlock;

		err = ugdb_do_each_thread(ugdb,
				vcont_arg.pid, vcont_arg.tid,
				do_vcont_thread, &vcont_arg);
		if (err)
			goto unlock;
	}

	rc = "OK";
unlock:
	mutex_unlock(&ugdb->u_mutex);

	return rc;
}

static const char *handle_qpass_signals(struct ugdb *ugdb, char *cmd)
{
	sigset_t *set = &ugdb->u_sig_ign;

	sigemptyset(set);
	while (*cmd) {
		char *end;
		int sig = simple_strtoul(cmd, &end, 16);

		if (cmd == end || *end != ';')
			return "E01";
		cmd = end + 1;

		sig = sig_from_gdb(sig);
		if (!sig)
			// XXX: to_gdb_sigmap[] incomplete...
			// return "E01";
			continue;

		sigaddset(set, sig);
	}

	return "OK";
}

// -----------------------------------------------------------------------------
static struct task_struct *
ugdb_prepare_examine(struct ugdb *ugdb, struct utrace_examiner *exam)
{
	struct ugdb_thread *thread;
	struct task_struct *task;
	int err;

	mutex_lock(&ugdb->u_mutex);
	thread = ugdb->u_cur_hg;
	if (!thread || !(thread->t_stop_state & T_STOP_STOPPED))
		goto err;

	// XXX: u_cur_hg can't exit, we hold the mutex
	task = thread_to_task(thread);
	if (!task)
		goto err;

	for (;;) {
		if (fatal_signal_pending(current))
			goto err;

		err = utrace_prepare_examine(task, thread->t_engine, exam);
		if (!err)
			break;

		if (err == -ESRCH)
			goto err;

		schedule_timeout_interruptible(1);
	}

	return task;
err:
	mutex_unlock(&ugdb->u_mutex);
	return NULL;
}

// XXX: we hold the mutex in between, but only because we can't
// use get_task_struct/put_task_struct.

static int
ugdb_finish_examine(struct ugdb *ugdb, struct utrace_examiner *exam)
{
	// XXX: u_cur_hg can't exit, we hold the mutex
	struct ugdb_thread *thread = ugdb->u_cur_hg;
	struct task_struct *task = thread_to_task(thread);
	int ret = -ESRCH;

	if (task)
		ret = utrace_finish_examine(task, thread->t_engine, exam);

	mutex_unlock(&ugdb->u_mutex);
	return ret;
}

#define REGSET_GENERAL	0
#define REGSET_FP	1

#define MAX_REG_SIZE	16

struct gdbreg {
	unsigned short
			gdb_size, gdb_offs,
			rs_setno, usr_offs;
};

//  gdb/gdbserver/i387-fp.c:i387_fxsave
struct gdb_i387_fxsave {
  unsigned short fctrl;
  unsigned short fstat;
  unsigned short ftag;
  unsigned short fop;
  unsigned int fioff;
  unsigned short fiseg;
  unsigned short pad1;
  unsigned int fooff;
  unsigned short foseg;
  unsigned short pad12;

  unsigned int mxcsr;
  unsigned int pad3;

  unsigned char st_space[128];
  unsigned char xmm_space[256];
};

#define FPU__(mem)						\
	.rs_setno = REGSET_FP,					\
	.usr_offs = offsetof(struct gdb_i387_fxsave, mem)

#define GEN__(mem)						\
	.rs_setno = REGSET_GENERAL,				\
	.usr_offs = offsetof(struct user_regs_struct, mem)

/* generated from gdb/regformats/reg-x86-64-linux.dat */
struct gdbreg gdb_regmap_64[] = {
	[ 0] = { .gdb_size =  8, .gdb_offs =   0, GEN__(ax) },
	[ 1] = { .gdb_size =  8, .gdb_offs =   8, GEN__(bx) },
	[ 2] = { .gdb_size =  8, .gdb_offs =  16, GEN__(cx) },
	[ 3] = { .gdb_size =  8, .gdb_offs =  24, GEN__(dx) },
	[ 4] = { .gdb_size =  8, .gdb_offs =  32, GEN__(si) },
	[ 5] = { .gdb_size =  8, .gdb_offs =  40, GEN__(di) },
	[ 6] = { .gdb_size =  8, .gdb_offs =  48, GEN__(bp) },
	[ 7] = { .gdb_size =  8, .gdb_offs =  56, GEN__(sp) },
	[ 8] = { .gdb_size =  8, .gdb_offs =  64, GEN__(r8) },
	[ 9] = { .gdb_size =  8, .gdb_offs =  72, GEN__(r9) },
	[10] = { .gdb_size =  8, .gdb_offs =  80, GEN__(r10) },
	[11] = { .gdb_size =  8, .gdb_offs =  88, GEN__(r11) },
	[12] = { .gdb_size =  8, .gdb_offs =  96, GEN__(r12) },
	[13] = { .gdb_size =  8, .gdb_offs = 104, GEN__(r13) },
	[14] = { .gdb_size =  8, .gdb_offs = 112, GEN__(r14) },
	[15] = { .gdb_size =  8, .gdb_offs = 120, GEN__(r15) },
	[16] = { .gdb_size =  8, .gdb_offs = 128, GEN__(ip) },
	[17] = { .gdb_size =  4, .gdb_offs = 136, GEN__(flags) },
	[18] = { .gdb_size =  4, .gdb_offs = 140, GEN__(cs) },
	[19] = { .gdb_size =  4, .gdb_offs = 144, GEN__(ss) },
	[20] = { .gdb_size =  4, .gdb_offs = 148, GEN__(ds) },
	[21] = { .gdb_size =  4, .gdb_offs = 152, GEN__(es) },
	[22] = { .gdb_size =  4, .gdb_offs = 156, GEN__(fs) },
	[23] = { .gdb_size =  4, .gdb_offs = 160, GEN__(gs) },
	[24] = { .gdb_size = 10, .gdb_offs = 164, FPU__(st_space[0]) },
	[25] = { .gdb_size = 10, .gdb_offs = 174, FPU__(st_space[16]) },
	[26] = { .gdb_size = 10, .gdb_offs = 184, FPU__(st_space[32]) },
	[27] = { .gdb_size = 10, .gdb_offs = 194, FPU__(st_space[48]) },
	[28] = { .gdb_size = 10, .gdb_offs = 204, FPU__(st_space[64]) },
	[29] = { .gdb_size = 10, .gdb_offs = 214, FPU__(st_space[80]) },
	[30] = { .gdb_size = 10, .gdb_offs = 224, FPU__(st_space[96]) },
	[31] = { .gdb_size = 10, .gdb_offs = 234, FPU__(st_space[112]) },
	[32] = { .gdb_size =  4, .gdb_offs = 244, FPU__(fctrl) },
	[33] = { .gdb_size =  4, .gdb_offs = 248, FPU__(fstat) },
	[34] = { .gdb_size =  4, .gdb_offs = 252, FPU__(ftag) },
	[35] = { .gdb_size =  4, .gdb_offs = 256, FPU__(fiseg) },
	[36] = { .gdb_size =  4, .gdb_offs = 260, FPU__(fioff) },
	[37] = { .gdb_size =  4, .gdb_offs = 264, FPU__(foseg) },
	[38] = { .gdb_size =  4, .gdb_offs = 268, FPU__(fooff) },
	[39] = { .gdb_size =  4, .gdb_offs = 272, FPU__(fop) },
	[40] = { .gdb_size = 16, .gdb_offs = 276, FPU__(xmm_space[0]) },
	[41] = { .gdb_size = 16, .gdb_offs = 292, FPU__(xmm_space[16]) },
	[42] = { .gdb_size = 16, .gdb_offs = 308, FPU__(xmm_space[32]) },
	[43] = { .gdb_size = 16, .gdb_offs = 324, FPU__(xmm_space[48]) },
	[44] = { .gdb_size = 16, .gdb_offs = 340, FPU__(xmm_space[64]) },
	[45] = { .gdb_size = 16, .gdb_offs = 356, FPU__(xmm_space[80]) },
	[46] = { .gdb_size = 16, .gdb_offs = 372, FPU__(xmm_space[96]) },
	[47] = { .gdb_size = 16, .gdb_offs = 388, FPU__(xmm_space[112]) },
	[48] = { .gdb_size = 16, .gdb_offs = 404, FPU__(xmm_space[128]) },
	[49] = { .gdb_size = 16, .gdb_offs = 420, FPU__(xmm_space[144]) },
	[50] = { .gdb_size = 16, .gdb_offs = 436, FPU__(xmm_space[160]) },
	[51] = { .gdb_size = 16, .gdb_offs = 452, FPU__(xmm_space[176]) },
	[52] = { .gdb_size = 16, .gdb_offs = 468, FPU__(xmm_space[192]) },
	[53] = { .gdb_size = 16, .gdb_offs = 484, FPU__(xmm_space[208]) },
	[54] = { .gdb_size = 16, .gdb_offs = 500, FPU__(xmm_space[224]) },
	[55] = { .gdb_size = 16, .gdb_offs = 516, FPU__(xmm_space[240]) },
	[56] = { .gdb_size =  4, .gdb_offs = 532, FPU__(mxcsr) },
	[57] = { .gdb_size =  8, .gdb_offs = 536, GEN__(orig_ax) },
};

#undef FPU__
#undef GEN__

static inline int rw_one_reg(struct task_struct *task, int regnum,
				unsigned char regval[], bool write)
{
	const struct user_regset_view *view;
	const struct user_regset *rset;
	struct gdbreg *gdbreg;
	int err, count;

	view = task_user_regset_view(task);
	switch (view->e_machine) {
	case EM_X86_64:
		if (regnum >= ARRAY_SIZE(gdb_regmap_64))
			return -EINVAL;
		gdbreg = gdb_regmap_64;
		count = sizeof(long);
		break;

	default:
		return -EIO;
	}

	gdbreg += regnum;
	rset = view->regsets + gdbreg->rs_setno;

	if (gdbreg->rs_setno != REGSET_GENERAL)
		count = gdbreg->gdb_size;

	if (WARN_ON(count > MAX_REG_SIZE))
		return -EOVERFLOW;

	if (write)
		err = rset->set(task, rset, gdbreg->usr_offs,
				count, regval, NULL);
	else
		err = rset->get(task, rset, gdbreg->usr_offs,
				count, regval, NULL);

	return err ?: gdbreg->gdb_size;
}

static int ugdb_rw_one_reg(struct ugdb *ugdb, int regnum,
				unsigned char regval[], bool write)
{
	struct utrace_examiner exam;
	struct task_struct *task;
	int ret = -ESRCH;

	task = ugdb_prepare_examine(ugdb, &exam);
	if (!task)
		goto ret;

	ret = rw_one_reg(task, regnum, regval, write);

	if (ugdb_finish_examine(ugdb, &exam))
		ret = -ESRCH;
ret:
	return ret;
}

// XXX: hex_to_bin() after 90378889 commit
#include <linux/ctype.h>
static int cap_hex_to_bin(char ch)
{
	if ((ch >= '0') && (ch <= '9'))
		return ch - '0';
	ch = tolower(ch);
	if ((ch >= 'a') && (ch <= 'f'))
		return ch - 'a' + 10;
	return -1;
}

static int unhex(char *cmd, int size)
{
	char *bytes = cmd;

	while (size--) {
		int lo, hi;

		hi = cap_hex_to_bin(*cmd++);
		lo = cap_hex_to_bin(*cmd++);
		if (lo < 0 || hi < 0)
			return -EINVAL;

		*bytes++ = (hi << 4) | lo;
	}

	return 0;
}

static const char *handle_set_reg(struct ugdb *ugdb, char *cmd, int len)
{
	unsigned int reg;
	int skip;

	if (sscanf(cmd, "P%x=%n", &reg, &skip) != 1)
		goto err;

	cmd += skip;
	len -= skip;

	if (len & 1)
		goto err;

	len /= 2;
	if (unhex(cmd, len))
		goto err;

	if (ugdb_rw_one_reg(ugdb, reg, cmd, true) != len)
		goto err;

	return "OK";
err:
	return "E01";
}

static const char *handle_get_reg(struct ugdb *ugdb, char *cmd, int len)
{
	unsigned char regval[MAX_REG_SIZE];
	unsigned int reg;
	int skip, size;

	if (sscanf(cmd, "p%x%n", &reg, &skip) != 1)
		goto err;
	if (len != skip)
		goto err;

	size = ugdb_rw_one_reg(ugdb, reg, regval, false);
	if (size < 0)
		goto err;

	pb_start(&ugdb->u_pbuf);
	pb_putbs(&ugdb->u_pbuf, regval, size);
	pb_end(&ugdb->u_pbuf);

	return NULL;
err:
	return "E01";
}

static int rw_one_regset(struct task_struct *task, int rsn,
				const struct user_regset_view *view,
				void *reg_mem, int reg_size,
				struct gdbreg *reg_map, int reg_cnt,
				void *gdb_mem, bool write)
{
	const struct user_regset *rset = view->regsets + rsn;
	int err;

	if (write) {
		for (; reg_cnt--; ++reg_map) {
			if (reg_map->rs_setno != rsn)
				continue;

			memcpy(reg_mem + reg_map->usr_offs,
			       gdb_mem + reg_map->gdb_offs,
			       reg_map->gdb_size);
		}

		err = rset->set(task, rset, 0, reg_size, reg_mem, NULL);
	} else {
		err = rset->get(task, rset, 0, reg_size, reg_mem, NULL);
		if (err)
			goto ret;

		for (; reg_cnt--; ++reg_map) {
			if (reg_map->rs_setno != rsn)
				continue;

			memcpy(gdb_mem + reg_map->gdb_offs,
			       reg_mem + reg_map->usr_offs,
			       reg_map->gdb_size);
		}
	}
ret:
	return err;
}

#define GDB_REGS_SIZE(regmap)				\
	(regmap[ARRAY_SIZE(regmap)-1].gdb_size +	\
	 regmap[ARRAY_SIZE(regmap)-1].gdb_offs)

struct regs_buf {
	unsigned char gdb_mem[1024];
	union {
		struct user_regs_struct	user_regs_struct;
		struct user_i387_struct	user_i387_struct;
	};
};

static int ugdb_rw_all_regs(struct task_struct *task,
				struct regs_buf *regs_buf,
				bool write)
{
	const struct user_regset_view *view;
	int ret;

#define RW(rsn, user, rmap)						\
	rw_one_regset(task, rsn, view,					\
			&regs_buf->user, sizeof(regs_buf->user),	\
			rmap, ARRAY_SIZE(rmap), regs_buf->gdb_mem,	\
			write)

	view = task_user_regset_view(task);
	switch (view->e_machine) {
	case EM_X86_64:
// XXX: even this needs fixup. There is no ->fs_base in gdb's set
// of regs, it is wrongly initialized as 0 and putreg()->do_arch_prctl()
// changes fs. Later, gdb seems to never use G if P works.
		ret = RW(REGSET_GENERAL, user_regs_struct, gdb_regmap_64);
		if (ret)
			break;

// XXX: needs fixup, see i387_fxsave_to_cache/i387_cache_to_fxsave.
// At least xfpregs_get() is safe even if result is not correct.
if (!write)
		ret = RW(REGSET_FP, user_i387_struct, gdb_regmap_64);
		if (ret)
			break;

		ret = GDB_REGS_SIZE(gdb_regmap_64);
		break;

	default:
		ret = -EIO;
	}

#undef RW

	return ret;
}

static const char *handle_get_all_regs(struct ugdb *ugdb)
{
	struct regs_buf *regs_buf;
	struct utrace_examiner exam;
	struct task_struct *task;
	const char *ret = "E01";
	int size;

	regs_buf = kzalloc(sizeof(*regs_buf), GFP_KERNEL);
	if (!regs_buf)
		goto out;

	task = ugdb_prepare_examine(ugdb, &exam);
	if (!task)
		goto err;

	size = ugdb_rw_all_regs(task, regs_buf, false);

	if (ugdb_finish_examine(ugdb, &exam))
		goto err;

	if (size < 0)
		goto err;

	if (size > sizeof(regs_buf->gdb_mem)) {
		WARN_ON(1);
		goto err;
	}

	if (pb_room(&ugdb->u_pbuf) < 3 + size * 2) {
		WARN_ON(1);
		goto err;
	}

	pb_start(&ugdb->u_pbuf);
	pb_putbs(&ugdb->u_pbuf, regs_buf->gdb_mem, size);
	pb_end(&ugdb->u_pbuf);

	ret = NULL;
err:
	kfree(regs_buf);
out:
	return ret;
}

static const char *handle_set_all_regs(struct ugdb *ugdb, char *cmd, int len)
{
	struct regs_buf *regs_buf;
	struct utrace_examiner exam;
	struct task_struct *task;
	const char *ret = "E01";
	int size;

	printk(KERN_INFO "XXX: $G command, why?\n");

	cmd += 1;
	len -= 1;
	if (len & 1)
		goto out;

	len /= 2;
	if (unhex(cmd, len))
		goto out;

	regs_buf = kzalloc(sizeof(*regs_buf), GFP_KERNEL);
	if (!regs_buf)
		goto out;

	memcpy(regs_buf->gdb_mem, cmd, len);

	task = ugdb_prepare_examine(ugdb, &exam);
	if (!task)
		goto err;

	size = ugdb_rw_all_regs(task, regs_buf, true);

	if (ugdb_finish_examine(ugdb, &exam))
		goto err;

	if (size < 0)
		goto err;

	WARN_ON(size != len);
	ret = "OK";
err:
	kfree(regs_buf);
out:
	return ret;
}

static typeof(access_process_vm) *u_access_process_vm;

static const char *handle_readmem(struct ugdb *ugdb, char *cmd)
{
	struct utrace_examiner exam;
	struct task_struct *task;
	unsigned long addr, size;
	unsigned char *mbuf;
	int copied;

	if (sscanf(cmd, "m%lx,%lx", &addr, &size) != 2)
		goto err;

	size = min_t(unsigned long, size, pb_max_bs_size(&ugdb->u_pbuf));
	if (!size)
		goto err;

	mbuf = pb_alloc_bs(&ugdb->u_pbuf, size);
	if (WARN_ON(!mbuf))
		goto err;

	task = ugdb_prepare_examine(ugdb, &exam);
	if (!task)
		goto err;

	copied = u_access_process_vm(task, addr, mbuf, size, 0);

	if (ugdb_finish_examine(ugdb, &exam))
		goto err;

	if (copied > 0 ) {
		pb_start(&ugdb->u_pbuf);
		pb_putbs(&ugdb->u_pbuf, mbuf, copied);
		pb_end(&ugdb->u_pbuf);

		return NULL;
	}
err:
	return "E01";
}

static const char *handle_writemem(struct ugdb *ugdb, char *cmd, int len)
{
	unsigned long addr, size;
	unsigned int skip, written;
	struct utrace_examiner exam;
	struct task_struct *task;

	if (sscanf(cmd, "M%lx,%lx:%n", &addr, &size, &skip) != 2)
		goto err;

	cmd += skip;
	len -= skip;
	if (len != 2*size || !size)
		goto err;

	if (unhex(cmd, size))
		goto err;

	task = ugdb_prepare_examine(ugdb, &exam);
	if (!task)
		goto err;

	written = u_access_process_vm(task, addr, cmd, size, 1);

	if (ugdb_finish_examine(ugdb, &exam))
		goto err;

	if (written == size)
		return "OK";
err:
	return "E01";
}

static int ugdb_siginfo_rw(struct ugdb *ugdb, siginfo_t *info, bool write)
{
	struct task_struct *task;
	struct utrace_examiner exam;
	struct sighand_struct *sighand;
	siginfo_t *t_siginfo;
	int ret = -EINVAL;

	/*
	 * ugdb_prepare_examine() is overkill, but otherwise we can't
	 * assume task_is_traced(), and this is what ensures we can
	 * safely read/write ->t_siginfo which points to task's stack.
	 */
	task = ugdb_prepare_examine(ugdb, &exam);
	if (!task)
		goto out;

	/* OK, task_struct can't go away, but ->sighand can. */
	rcu_read_lock();
	sighand = rcu_dereference(task->sighand);
	if (!sighand)
		goto unlock_rcu;

	spin_lock_irq(&sighand->siglock);
	if (!task_is_traced(task))
		goto unlock_siglock;

	t_siginfo = ugdb->u_cur_hg->t_siginfo;
	if (!t_siginfo)
		goto unlock_siglock;

	if (write)
		*t_siginfo = *info;
	else
		*info = *t_siginfo;

	ret = 0;
unlock_siglock:
	spin_unlock_irq(&sighand->siglock);
unlock_rcu:
	rcu_read_unlock();

	ugdb_finish_examine(ugdb, &exam);
out:
	return ret;
}

static const char *handle_siginfo_read(struct ugdb *ugdb, char *cmd)
{
	unsigned int off, len;
	siginfo_t info;

	if (sscanf(cmd, "%x,%x", &off, &len) != 2)
		goto err;

	if (off >= sizeof(info))
		goto err;

	if (len > sizeof(info) || off + len > sizeof(info))
		len = sizeof(info) - off;

	if (ugdb_siginfo_rw(ugdb, &info, false))
		goto err;

	if (pb_qfer(&ugdb->u_pbuf, &info + off, len,
				(off + len < sizeof(info))))
		goto err;

	// XXX: Oh. we also need x86_siginfo_fixup(). how ugly.

	return NULL;
err:
	return "E01";
}

// -----------------------------------------------------------------------------
#define EQ(cmd, str)					\
	(strncmp((cmd), (str), sizeof(str)-1) ?	false :	\
		((cmd) += sizeof(str)-1, true))

static const char *handle_qfer(struct ugdb *ugdb, char *cmd)
{
	const char *rc = "E01";

	if (EQ(cmd, "siginfo:")) {
		if (EQ(cmd, "read::"))
			rc = handle_siginfo_read(ugdb, cmd);
	}

	return rc;
}

static void handle_command(struct ugdb *ugdb, char *cmd, int len)
{
	struct pbuf *pb = &ugdb->u_pbuf;
	const char *rc = "";

	switch (cmd[0]) {
	case '!':
	case '?':
		rc = "OK";
		break;

	case 'H':
		rc = handle_set_cur(ugdb, cmd + 1);
		break;

	case 'T':
		rc = handle_ck_alive(ugdb, cmd + 1);
		break;

	case 'D':
		rc = handle_detach(ugdb, cmd + 1);
		break;

	case 'g':
		rc = handle_get_all_regs(ugdb);
		break;

	case 'G':
		rc = handle_set_all_regs(ugdb, cmd, len);
		break;

	case 'p':
		rc = handle_get_reg(ugdb, cmd, len);
		break;

	case 'P':
		rc = handle_set_reg(ugdb, cmd, len);
		break;

	case 'm':
		rc = handle_readmem(ugdb, cmd);
		break;

	case 'M':
		rc = handle_writemem(ugdb, cmd, len);
		break;

	case 'C':
	case 'c':
	case 'S':
	case 's':
		rc = handle_c(ugdb, cmd);
		break;

	case 'z':
	case 'Z':
		rc = handle_z(ugdb, cmd, len);
		break;

	case 'q':
		if (EQ(cmd, "qSupported")) {
			if (!strstr(cmd, "multiprocess+")) {
				printk(KERN_INFO "ugdb: can't work without multiprocess\n");
				ugdb->u_err = -EPROTONOSUPPORT;
			}

			pb_packf(&ugdb->u_pbuf, "PacketSize=%x;%s",
				PACKET_SIZE,
				"QStartNoAckMode+;QNonStop+;multiprocess+;"
				"QPassSignals+;qXfer:siginfo:read+");
			rc = NULL;
		}
		else if (EQ(cmd, "qfThreadInfo")) {
			rc = handle_thread_info(ugdb, true);
		}
		else if (EQ(cmd, "qsThreadInfo")) {
			rc = handle_thread_info(ugdb, false);
		}
		else if (EQ(cmd, "qXfer:")) {
			rc = handle_qfer(ugdb, cmd);
		}
		else if (EQ(cmd, "qTStatus")) {
			rc = "T0";
		}

		break;

	case 'Q':
		if (EQ(cmd, "QStartNoAckMode")) {
			ugdb->u_no_ack = true;
			rc = "OK";
		}
		else if (EQ(cmd, "QNonStop:")) {
			if (*cmd != '1') {
				printk(KERN_INFO "ugdb: all-stop is not implemented.\n");
				ugdb->u_err = -EPROTONOSUPPORT;
			}

			rc = "OK";
		}
		else if (EQ(cmd, "QPassSignals:")) {
			rc = handle_qpass_signals(ugdb, cmd);
		}

		break;

	case 'v':
		if (EQ(cmd, "vAttach;")) {
			rc = handle_vattach(ugdb, cmd);
		}
		else if (EQ(cmd, "vStopped")) {
			rc = handle_vstopped(ugdb);
		}
		else if (EQ(cmd, "vCont")) {
			rc = handle_vcont(ugdb, cmd);
		}

		break;

	default:
		;
	}

	if (rc)
		pb_packs(pb, rc);
}

static void process_commands(struct ugdb *ugdb)
{
	char *cmds = ugdb->u_cbuf;
	int todo = ugdb->u_clen;

	if (o_remote_debug)
		printk(KERN_INFO "=> %.*s\n", ugdb->u_clen, ugdb->u_cbuf);

	while (todo) {
		char first;
		char *c_cmd, *c_end;
		int c_len;

		first = *cmds++;
		todo--;

		switch (first) {
		default:
			printk(KERN_INFO "XXX: unknown chr %02x\n", first);
			pb_putc(&ugdb->u_pbuf, '-');
			break;

		case '-':
			printk(KERN_INFO "XXX: got NACK!\n");
			ugdb->u_err = -EPROTO;
		case '+':
			break;

		case 0x3:
			printk(KERN_INFO "XXX: unexpected CTRL-C\n");
			break;

		case '$':
			c_cmd = cmds;
			c_end = strnchr(c_cmd, todo, '#');
			c_len = c_end ? c_end - cmds : -1;

			if (c_len < 0 || todo < c_len + 3) {
				printk(KERN_INFO "XXX: can't find '#cs'\n");
				++todo;
				--cmds;
				goto out;
			}

			// XXX: verify checksum ?
			todo -= c_len + 3;
			cmds += c_len + 3;
			*c_end = 0;

			if (!ugdb->u_no_ack)
				pb_putc(&ugdb->u_pbuf, '+');

			handle_command(ugdb, c_cmd, c_len);
		}
	}
out:
	ugdb->u_clen = todo;
	if (todo && cmds > ugdb->u_cbuf)
		memmove(ugdb->u_cbuf, cmds, todo);
}

// -----------------------------------------------------------------------------
static int xxx_tinfo(struct ugdb *ugdb)
{
	struct ugdb_thread *thread;
	int tid = 0;

	mutex_lock(&ugdb->u_mutex);
	thread = ugdb_advance_tinfo(ugdb);
	if (thread)
		tid = thread->t_tid;
	mutex_unlock(&ugdb->u_mutex);

	return tid;
}

static int do_stop_thread(struct ugdb_thread *thread, void *arg)
{
	ugdb_stop_thread(thread, arg != NULL);
	return 0;
}

static int do_cont_thread(struct ugdb_thread *thread, void *arg)
{
	ugdb_cont_thread(thread, arg != NULL, false);
	return 0;
}

static int xxx_sc_threads(struct ugdb *ugdb, int tid, bool sc)
{
	void *arg = NULL;
	int pid = 0;
	int ret;

	if (tid < 0) {
		pid = -tid;
		tid = -1;
		arg = (void*)1;
	}

	mutex_lock(&ugdb->u_mutex);
	ret = ugdb_do_each_thread(ugdb, pid, tid,
				sc ? do_stop_thread : do_cont_thread,
				arg);
	mutex_unlock(&ugdb->u_mutex);

	return ret;
}

static int xxx_stop(struct ugdb *ugdb, int tid)
{
	return xxx_sc_threads(ugdb, tid, true);
}

static int xxx_cont(struct ugdb *ugdb, int tid)
{
	return xxx_sc_threads(ugdb, tid, false);
}

static int xxx_get_stopped(struct ugdb *ugdb)
{
	struct ugdb_thread *thread;
	int tid = 1;

	if (ugdb->u_stop_state == U_STOP_IDLE)
		return -1;

	if (ugdb->u_stop_state == U_STOP_PENDING)
		tid = 1000;

	thread = ugdb_next_stopped(ugdb);
	if (!thread)
		return 0;
	return tid * thread->t_tid;
}

static int xxx_show_all(struct ugdb *ugdb)
{
	struct ugdb_process *process;
	struct ugdb_thread *thread;

	printk(KERN_INFO "SHOW start ----------------------------------------\n");

	mutex_lock(&ugdb->u_mutex);
	list_for_each_entry(process, &ugdb->u_processes, p_processes) {
		printk(KERN_INFO "PROC: %x\n", process->p_pid);

		list_for_each_entry(thread, &process->p_threads, t_threads) {
			printk(KERN_INFO "    T: %x %p; %p %p\n",
					thread->t_tid, thread,
					thread->t_spid, pid_task(thread->t_spid, PIDTYPE_PID));
		}

	}
	mutex_unlock(&ugdb->u_mutex);

	printk(KERN_INFO "SHOW end ----------------------------------------\n");
	return 0;
}

static long ugdb_f_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	struct ugdb *ugdb = file->private_data;
	// XXX: otherwise gdb->get_tty_state(TCGETS, TCSETS, TCFLSH) complains
	int ret = 0;

	// XXX: temporary debugging hooks, ignore.
	switch (cmd) {
		case 0x666 + 0:
			ret = ugdb_attach(ugdb, arg);
			break;

		case 0x666 + 1:
			ret = ugdb_detach(ugdb, arg);
			break;

		case 0x666 + 2:
			ret = xxx_tinfo(ugdb);
			break;

		case 0x666 + 3:
			ret = xxx_stop(ugdb, arg);
			break;

		case 0x666 + 4:
			ret = xxx_cont(ugdb, arg);
			break;

		case 0x666 + 5:
			ret = xxx_get_stopped(ugdb);
			break;

		case 0x666 + 6:
			ret = xxx_show_all(ugdb);
			break;
	}

	return ret;
}

static unsigned int ugdb_f_poll(struct file *file, poll_table *wait)
{
	struct ugdb *ugdb = file->private_data;
	unsigned int mask;

	poll_wait(file, &ugdb->u_wait, wait);

	mask = (POLLOUT | POLLWRNORM);

	if (pb_size(&ugdb->u_pbuf) || ugdb->u_stop_state == U_STOP_PENDING)
		mask |= (POLLIN | POLLRDNORM);

	if (ugdb->u_err)
		mask |= POLLERR;

	return mask;
}

static ssize_t ugdb_f_read(struct file *file, char __user *ubuf,
				size_t count, loff_t *ppos)
{
	struct ugdb *ugdb = file->private_data;
	struct pbuf *pb = &ugdb->u_pbuf;

	if (ugdb->u_err)
		return ugdb->u_err;

	if (ugdb->u_stop_state == U_STOP_PENDING)
		ugdb_report_stopped(ugdb, true);

	if (pb_size(pb) > count) {
		printk(KERN_INFO "XXX: short read %d %ld\n",
					pb_size(pb), count);
	}

	return pb_copy_to_user(pb, ubuf, count);
}

static ssize_t ugdb_f_write(struct file *file, const char __user *ubuf,
				size_t count, loff_t *ppos)
{
	struct ugdb *ugdb = file->private_data;

	if (ugdb->u_err)
		return ugdb->u_err;

	if (count > PACKET_SIZE - ugdb->u_clen) {
		count = PACKET_SIZE - ugdb->u_clen;
		printk("XXX: write(%ld,%d) enospc\n", count, ugdb->u_clen);
		if (1)
			return ugdb->u_err = -ENOSPC;
	}

	if (copy_from_user(ugdb->u_cbuf + ugdb->u_clen, ubuf, count))
		return -EFAULT;

	ugdb->u_clen += count;
	process_commands(ugdb);

	return count;
}

static int ugdb_f_open(struct inode *inode, struct file *file)
{
	nonseekable_open(inode, file);

	file->private_data = ugdb_create();

	return	IS_ERR(file->private_data) ?
		PTR_ERR(file->private_data) : 0;
}

static int ugdb_f_release(struct inode *inode, struct file *file)
{
	ugdb_destroy(file->private_data);

	return 0;
}

static const struct file_operations ugdb_f_ops = {
	.open			= ugdb_f_open,
	.unlocked_ioctl		= ugdb_f_ioctl,
	.poll			= ugdb_f_poll,
	.read			= ugdb_f_read,
	.write			= ugdb_f_write,
	.release		= ugdb_f_release,
};

#include <linux/kallsyms.h>

struct kallsyms_sym {
	const char	*name;
	unsigned long	addr;
};

static int kallsyms_on_each_symbol_cb(void *data, const char *name,
				struct module *mod, unsigned long addr)
{
	struct kallsyms_sym *sym = data;

	if (strcmp(name, sym->name))
		return 0;

	sym->addr = addr;
	return 1;
}

// XXX: kallsyms_lookup_name() is not exported in 2.6.32
static bool lookup_unexported(void)
{
	struct kallsyms_sym sym;

	sym.name = "access_process_vm";
	if (!kallsyms_on_each_symbol(kallsyms_on_each_symbol_cb, &sym))
		goto err;
	u_access_process_vm = (void*)sym.addr;

	sym.name = "arch_ptrace";
	if (!kallsyms_on_each_symbol(kallsyms_on_each_symbol_cb, &sym))
		goto err;
	u_arch_ptrace = (void*)sym.addr;

	return true;
err:
	printk(KERN_ERR "ugdb: can't lookup %s\n", sym.name);
	return false;
}

#define PROC_NAME	"ugdb"
struct proc_dir_entry *ugdb_pde;

static int __init ugdb_init(void)
{
	if (!lookup_unexported())
		return -ESRCH;

	ugdb_pde = proc_create(PROC_NAME, S_IFREG|S_IRUGO|S_IWUGO,
				NULL, &ugdb_f_ops);
	if (!ugdb_pde)
		return -EBADF;

	return 0;
}

static void __exit ugdb_exit(void)
{
	remove_proc_entry(PROC_NAME, NULL);
}

MODULE_LICENSE("GPL");
module_init(ugdb_init);
module_exit(ugdb_exit);

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2011-02-23 17:24 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-11-15 19:12 gdbstub initial code, v16 Oleg Nesterov
2010-11-16 20:02 ` Roland McGrath
     [not found] ` <20110215204148.GA17258@host1.dyn.jankratochvil.net>
     [not found]   ` <20110215215438.CBD0E1806E0@magilla.sf.frob.com>
     [not found]     ` <20110216214423.GA22228@redhat.com>
     [not found]       ` <20110216220541.55E701802A2@magilla.sf.frob.com>
     [not found]         ` <20110217211225.GA17768@redhat.com>
     [not found]           ` <20110221193927.122901814AE@magilla.sf.frob.com>
     [not found]             ` <20110222203834.GA6977@redhat.com>
2011-02-23 15:51               ` safe PTRACE_ATTACH Jan Kratochvil
2011-02-23 17:24                 ` Oleg Nesterov

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).