diff options
Diffstat (limited to 'fs/xfs/xfs_log.c')
| -rw-r--r-- | fs/xfs/xfs_log.c | 612 | 
1 files changed, 262 insertions, 350 deletions
| diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index e2cc3568c299..98a9cb5ffd17 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -67,15 +67,10 @@ STATIC void xlog_state_switch_iclogs(xlog_t		*log,  				     int		eventual_size);  STATIC void xlog_state_want_sync(xlog_t	*log, xlog_in_core_t *iclog); -/* local functions to manipulate grant head */ -STATIC int  xlog_grant_log_space(xlog_t		*log, -				 xlog_ticket_t	*xtic);  STATIC void xlog_grant_push_ail(struct log	*log,  				int		need_bytes);  STATIC void xlog_regrant_reserve_log_space(xlog_t	 *log,  					   xlog_ticket_t *ticket); -STATIC int xlog_regrant_write_log_space(xlog_t		*log, -					 xlog_ticket_t  *ticket);  STATIC void xlog_ungrant_log_space(xlog_t	 *log,  				   xlog_ticket_t *ticket); @@ -150,78 +145,93 @@ xlog_grant_add_space(  	} while (head_val != old);  } -STATIC bool -xlog_reserveq_wake( -	struct log		*log, -	int			*free_bytes) +STATIC void +xlog_grant_head_init( +	struct xlog_grant_head	*head) +{ +	xlog_assign_grant_head(&head->grant, 1, 0); +	INIT_LIST_HEAD(&head->waiters); +	spin_lock_init(&head->lock); +} + +STATIC void +xlog_grant_head_wake_all( +	struct xlog_grant_head	*head)  {  	struct xlog_ticket	*tic; -	int			need_bytes; -	list_for_each_entry(tic, &log->l_reserveq, t_queue) { +	spin_lock(&head->lock); +	list_for_each_entry(tic, &head->waiters, t_queue) +		wake_up_process(tic->t_task); +	spin_unlock(&head->lock); +} + +static inline int +xlog_ticket_reservation( +	struct log		*log, +	struct xlog_grant_head	*head, +	struct xlog_ticket	*tic) +{ +	if (head == &log->l_write_head) { +		ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); +		return tic->t_unit_res; +	} else {  		if (tic->t_flags & XLOG_TIC_PERM_RESERV) -			need_bytes = tic->t_unit_res * tic->t_cnt; +			return tic->t_unit_res * tic->t_cnt;  		else -			need_bytes = tic->t_unit_res; - -		if (*free_bytes < need_bytes) -			return false; -		*free_bytes -= need_bytes; - -		trace_xfs_log_grant_wake_up(log, tic); -		wake_up(&tic->t_wait); +			return tic->t_unit_res;  	} - -	return true;  }  STATIC bool -xlog_writeq_wake( +xlog_grant_head_wake(  	struct log		*log, +	struct xlog_grant_head	*head,  	int			*free_bytes)  {  	struct xlog_ticket	*tic;  	int			need_bytes; -	list_for_each_entry(tic, &log->l_writeq, t_queue) { -		ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); - -		need_bytes = tic->t_unit_res; - +	list_for_each_entry(tic, &head->waiters, t_queue) { +		need_bytes = xlog_ticket_reservation(log, head, tic);  		if (*free_bytes < need_bytes)  			return false; -		*free_bytes -= need_bytes; -		trace_xfs_log_regrant_write_wake_up(log, tic); -		wake_up(&tic->t_wait); +		*free_bytes -= need_bytes; +		trace_xfs_log_grant_wake_up(log, tic); +		wake_up_process(tic->t_task);  	}  	return true;  }  STATIC int -xlog_reserveq_wait( +xlog_grant_head_wait(  	struct log		*log, +	struct xlog_grant_head	*head,  	struct xlog_ticket	*tic,  	int			need_bytes)  { -	list_add_tail(&tic->t_queue, &log->l_reserveq); +	list_add_tail(&tic->t_queue, &head->waiters);  	do {  		if (XLOG_FORCED_SHUTDOWN(log))  			goto shutdown;  		xlog_grant_push_ail(log, need_bytes); +		__set_current_state(TASK_UNINTERRUPTIBLE); +		spin_unlock(&head->lock); +  		XFS_STATS_INC(xs_sleep_logspace); -		trace_xfs_log_grant_sleep(log, tic); -		xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock); +		trace_xfs_log_grant_sleep(log, tic); +		schedule();  		trace_xfs_log_grant_wake(log, tic); -		spin_lock(&log->l_grant_reserve_lock); +		spin_lock(&head->lock);  		if (XLOG_FORCED_SHUTDOWN(log))  			goto shutdown; -	} while (xlog_space_left(log, &log->l_grant_reserve_head) < need_bytes); +	} while (xlog_space_left(log, &head->grant) < need_bytes);  	list_del_init(&tic->t_queue);  	return 0; @@ -230,35 +240,58 @@ shutdown:  	return XFS_ERROR(EIO);  } +/* + * Atomically get the log space required for a log ticket. + * + * Once a ticket gets put onto head->waiters, it will only return after the + * needed reservation is satisfied. + * + * This function is structured so that it has a lock free fast path. This is + * necessary because every new transaction reservation will come through this + * path. Hence any lock will be globally hot if we take it unconditionally on + * every pass. + * + * As tickets are only ever moved on and off head->waiters under head->lock, we + * only need to take that lock if we are going to add the ticket to the queue + * and sleep. We can avoid taking the lock if the ticket was never added to + * head->waiters because the t_queue list head will be empty and we hold the + * only reference to it so it can safely be checked unlocked. + */  STATIC int -xlog_writeq_wait( +xlog_grant_head_check(  	struct log		*log, +	struct xlog_grant_head	*head,  	struct xlog_ticket	*tic, -	int			need_bytes) +	int			*need_bytes)  { -	list_add_tail(&tic->t_queue, &log->l_writeq); - -	do { -		if (XLOG_FORCED_SHUTDOWN(log)) -			goto shutdown; -		xlog_grant_push_ail(log, need_bytes); - -		XFS_STATS_INC(xs_sleep_logspace); -		trace_xfs_log_regrant_write_sleep(log, tic); +	int			free_bytes; +	int			error = 0; -		xlog_wait(&tic->t_wait, &log->l_grant_write_lock); -		trace_xfs_log_regrant_write_wake(log, tic); +	ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); -		spin_lock(&log->l_grant_write_lock); -		if (XLOG_FORCED_SHUTDOWN(log)) -			goto shutdown; -	} while (xlog_space_left(log, &log->l_grant_write_head) < need_bytes); +	/* +	 * If there are other waiters on the queue then give them a chance at +	 * logspace before us.  Wake up the first waiters, if we do not wake +	 * up all the waiters then go to sleep waiting for more free space, +	 * otherwise try to get some space for this transaction. +	 */ +	*need_bytes = xlog_ticket_reservation(log, head, tic); +	free_bytes = xlog_space_left(log, &head->grant); +	if (!list_empty_careful(&head->waiters)) { +		spin_lock(&head->lock); +		if (!xlog_grant_head_wake(log, head, &free_bytes) || +		    free_bytes < *need_bytes) { +			error = xlog_grant_head_wait(log, head, tic, +						     *need_bytes); +		} +		spin_unlock(&head->lock); +	} else if (free_bytes < *need_bytes) { +		spin_lock(&head->lock); +		error = xlog_grant_head_wait(log, head, tic, *need_bytes); +		spin_unlock(&head->lock); +	} -	list_del_init(&tic->t_queue); -	return 0; -shutdown: -	list_del_init(&tic->t_queue); -	return XFS_ERROR(EIO); +	return error;  }  static void @@ -286,6 +319,128 @@ xlog_tic_add_region(xlog_ticket_t *tic, uint len, uint type)  }  /* + * Replenish the byte reservation required by moving the grant write head. + */ +int +xfs_log_regrant( +	struct xfs_mount	*mp, +	struct xlog_ticket	*tic) +{ +	struct log		*log = mp->m_log; +	int			need_bytes; +	int			error = 0; + +	if (XLOG_FORCED_SHUTDOWN(log)) +		return XFS_ERROR(EIO); + +	XFS_STATS_INC(xs_try_logspace); + +	/* +	 * This is a new transaction on the ticket, so we need to change the +	 * transaction ID so that the next transaction has a different TID in +	 * the log. Just add one to the existing tid so that we can see chains +	 * of rolling transactions in the log easily. +	 */ +	tic->t_tid++; + +	xlog_grant_push_ail(log, tic->t_unit_res); + +	tic->t_curr_res = tic->t_unit_res; +	xlog_tic_reset_res(tic); + +	if (tic->t_cnt > 0) +		return 0; + +	trace_xfs_log_regrant(log, tic); + +	error = xlog_grant_head_check(log, &log->l_write_head, tic, +				      &need_bytes); +	if (error) +		goto out_error; + +	xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); +	trace_xfs_log_regrant_exit(log, tic); +	xlog_verify_grant_tail(log); +	return 0; + +out_error: +	/* +	 * If we are failing, make sure the ticket doesn't have any current +	 * reservations.  We don't want to add this back when the ticket/ +	 * transaction gets cancelled. +	 */ +	tic->t_curr_res = 0; +	tic->t_cnt = 0;	/* ungrant will give back unit_res * t_cnt. */ +	return error; +} + +/* + * Reserve log space and return a ticket corresponding the reservation. + * + * Each reservation is going to reserve extra space for a log record header. + * When writes happen to the on-disk log, we don't subtract the length of the + * log record header from any reservation.  By wasting space in each + * reservation, we prevent over allocation problems. + */ +int +xfs_log_reserve( +	struct xfs_mount	*mp, +	int		 	unit_bytes, +	int		 	cnt, +	struct xlog_ticket	**ticp, +	__uint8_t	 	client, +	bool			permanent, +	uint		 	t_type) +{ +	struct log		*log = mp->m_log; +	struct xlog_ticket	*tic; +	int			need_bytes; +	int			error = 0; + +	ASSERT(client == XFS_TRANSACTION || client == XFS_LOG); + +	if (XLOG_FORCED_SHUTDOWN(log)) +		return XFS_ERROR(EIO); + +	XFS_STATS_INC(xs_try_logspace); + +	ASSERT(*ticp == NULL); +	tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, +				KM_SLEEP | KM_MAYFAIL); +	if (!tic) +		return XFS_ERROR(ENOMEM); + +	tic->t_trans_type = t_type; +	*ticp = tic; + +	xlog_grant_push_ail(log, tic->t_unit_res * tic->t_cnt); + +	trace_xfs_log_reserve(log, tic); + +	error = xlog_grant_head_check(log, &log->l_reserve_head, tic, +				      &need_bytes); +	if (error) +		goto out_error; + +	xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes); +	xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); +	trace_xfs_log_reserve_exit(log, tic); +	xlog_verify_grant_tail(log); +	return 0; + +out_error: +	/* +	 * If we are failing, make sure the ticket doesn't have any current +	 * reservations.  We don't want to add this back when the ticket/ +	 * transaction gets cancelled. +	 */ +	tic->t_curr_res = 0; +	tic->t_cnt = 0;	/* ungrant will give back unit_res * t_cnt. */ +	return error; +} + + +/*   * NOTES:   *   *	1. currblock field gets updated at startup and after in-core logs @@ -395,88 +550,6 @@ xfs_log_release_iclog(  }  /* - *  1. Reserve an amount of on-disk log space and return a ticket corresponding - *	to the reservation. - *  2. Potentially, push buffers at tail of log to disk. - * - * Each reservation is going to reserve extra space for a log record header. - * When writes happen to the on-disk log, we don't subtract the length of the - * log record header from any reservation.  By wasting space in each - * reservation, we prevent over allocation problems. - */ -int -xfs_log_reserve( -	struct xfs_mount	*mp, -	int		 	unit_bytes, -	int		 	cnt, -	struct xlog_ticket	**ticket, -	__uint8_t	 	client, -	uint		 	flags, -	uint		 	t_type) -{ -	struct log		*log = mp->m_log; -	struct xlog_ticket	*internal_ticket; -	int			retval = 0; - -	ASSERT(client == XFS_TRANSACTION || client == XFS_LOG); - -	if (XLOG_FORCED_SHUTDOWN(log)) -		return XFS_ERROR(EIO); - -	XFS_STATS_INC(xs_try_logspace); - - -	if (*ticket != NULL) { -		ASSERT(flags & XFS_LOG_PERM_RESERV); -		internal_ticket = *ticket; - -		/* -		 * this is a new transaction on the ticket, so we need to -		 * change the transaction ID so that the next transaction has a -		 * different TID in the log. Just add one to the existing tid -		 * so that we can see chains of rolling transactions in the log -		 * easily. -		 */ -		internal_ticket->t_tid++; - -		trace_xfs_log_reserve(log, internal_ticket); - -		xlog_grant_push_ail(log, internal_ticket->t_unit_res); -		retval = xlog_regrant_write_log_space(log, internal_ticket); -	} else { -		/* may sleep if need to allocate more tickets */ -		internal_ticket = xlog_ticket_alloc(log, unit_bytes, cnt, -						  client, flags, -						  KM_SLEEP|KM_MAYFAIL); -		if (!internal_ticket) -			return XFS_ERROR(ENOMEM); -		internal_ticket->t_trans_type = t_type; -		*ticket = internal_ticket; - -		trace_xfs_log_reserve(log, internal_ticket); - -		xlog_grant_push_ail(log, -				    (internal_ticket->t_unit_res * -				     internal_ticket->t_cnt)); -		retval = xlog_grant_log_space(log, internal_ticket); -	} - -	if (unlikely(retval)) { -		/* -		 * If we are failing, make sure the ticket doesn't have any -		 * current reservations.  We don't want to add this back -		 * when the ticket/ transaction gets cancelled. -		 */ -		internal_ticket->t_curr_res = 0; -		/* ungrant will give back unit_res * t_cnt. */ -		internal_ticket->t_cnt = 0; -	} - -	return retval; -} - - -/*   * Mount a log filesystem   *   * mp		- ubiquitous xfs mount point structure @@ -760,64 +833,35 @@ xfs_log_item_init(  	INIT_LIST_HEAD(&item->li_cil);  } +/* + * Wake up processes waiting for log space after we have moved the log tail. + */  void -xfs_log_move_tail(xfs_mount_t	*mp, -		  xfs_lsn_t	tail_lsn) +xfs_log_space_wake( +	struct xfs_mount	*mp)  { -	xlog_ticket_t	*tic; -	xlog_t		*log = mp->m_log; -	int		need_bytes, free_bytes; +	struct log		*log = mp->m_log; +	int			free_bytes;  	if (XLOG_FORCED_SHUTDOWN(log))  		return; -	if (tail_lsn == 0) -		tail_lsn = atomic64_read(&log->l_last_sync_lsn); - -	/* tail_lsn == 1 implies that we weren't passed a valid value.  */ -	if (tail_lsn != 1) -		atomic64_set(&log->l_tail_lsn, tail_lsn); - -	if (!list_empty_careful(&log->l_writeq)) { -#ifdef DEBUG -		if (log->l_flags & XLOG_ACTIVE_RECOVERY) -			panic("Recovery problem"); -#endif -		spin_lock(&log->l_grant_write_lock); -		free_bytes = xlog_space_left(log, &log->l_grant_write_head); -		list_for_each_entry(tic, &log->l_writeq, t_queue) { -			ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); +	if (!list_empty_careful(&log->l_write_head.waiters)) { +		ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); -			if (free_bytes < tic->t_unit_res && tail_lsn != 1) -				break; -			tail_lsn = 0; -			free_bytes -= tic->t_unit_res; -			trace_xfs_log_regrant_write_wake_up(log, tic); -			wake_up(&tic->t_wait); -		} -		spin_unlock(&log->l_grant_write_lock); +		spin_lock(&log->l_write_head.lock); +		free_bytes = xlog_space_left(log, &log->l_write_head.grant); +		xlog_grant_head_wake(log, &log->l_write_head, &free_bytes); +		spin_unlock(&log->l_write_head.lock);  	} -	if (!list_empty_careful(&log->l_reserveq)) { -#ifdef DEBUG -		if (log->l_flags & XLOG_ACTIVE_RECOVERY) -			panic("Recovery problem"); -#endif -		spin_lock(&log->l_grant_reserve_lock); -		free_bytes = xlog_space_left(log, &log->l_grant_reserve_head); -		list_for_each_entry(tic, &log->l_reserveq, t_queue) { -			if (tic->t_flags & XLOG_TIC_PERM_RESERV) -				need_bytes = tic->t_unit_res*tic->t_cnt; -			else -				need_bytes = tic->t_unit_res; -			if (free_bytes < need_bytes && tail_lsn != 1) -				break; -			tail_lsn = 0; -			free_bytes -= need_bytes; -			trace_xfs_log_grant_wake_up(log, tic); -			wake_up(&tic->t_wait); -		} -		spin_unlock(&log->l_grant_reserve_lock); +	if (!list_empty_careful(&log->l_reserve_head.waiters)) { +		ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); + +		spin_lock(&log->l_reserve_head.lock); +		free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); +		xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes); +		spin_unlock(&log->l_reserve_head.lock);  	}  } @@ -867,21 +911,7 @@ xfs_log_need_covered(xfs_mount_t *mp)  	return needed;  } -/****************************************************************************** - * - *	local routines - * - ****************************************************************************** - */ - -/* xfs_trans_tail_ail returns 0 when there is nothing in the list. - * The log manager must keep track of the last LR which was committed - * to disk.  The lsn of this LR will become the new tail_lsn whenever - * xfs_trans_tail_ail returns 0.  If we don't do this, we run into - * the situation where stuff could be written into the log but nothing - * was ever in the AIL when asked.  Eventually, we panic since the - * tail hits the head. - * +/*   * We may be holding the log iclog lock upon entering this routine.   */  xfs_lsn_t @@ -891,10 +921,17 @@ xlog_assign_tail_lsn(  	xfs_lsn_t		tail_lsn;  	struct log		*log = mp->m_log; +	/* +	 * To make sure we always have a valid LSN for the log tail we keep +	 * track of the last LSN which was committed in log->l_last_sync_lsn, +	 * and use that when the AIL was empty and xfs_ail_min_lsn returns 0. +	 * +	 * If the AIL has been emptied we also need to wake any process +	 * waiting for this condition. +	 */  	tail_lsn = xfs_ail_min_lsn(mp->m_ail);  	if (!tail_lsn)  		tail_lsn = atomic64_read(&log->l_last_sync_lsn); -  	atomic64_set(&log->l_tail_lsn, tail_lsn);  	return tail_lsn;  } @@ -1100,12 +1137,9 @@ xlog_alloc_log(xfs_mount_t	*mp,  	xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);  	xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);  	log->l_curr_cycle  = 1;	    /* 0 is bad since this is initial value */ -	xlog_assign_grant_head(&log->l_grant_reserve_head, 1, 0); -	xlog_assign_grant_head(&log->l_grant_write_head, 1, 0); -	INIT_LIST_HEAD(&log->l_reserveq); -	INIT_LIST_HEAD(&log->l_writeq); -	spin_lock_init(&log->l_grant_reserve_lock); -	spin_lock_init(&log->l_grant_write_lock); + +	xlog_grant_head_init(&log->l_reserve_head); +	xlog_grant_head_init(&log->l_write_head);  	error = EFSCORRUPTED;  	if (xfs_sb_version_hassector(&mp->m_sb)) { @@ -1280,7 +1314,7 @@ xlog_grant_push_ail(  	ASSERT(BTOBB(need_bytes) < log->l_logBBsize); -	free_bytes = xlog_space_left(log, &log->l_grant_reserve_head); +	free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);  	free_blocks = BTOBBT(free_bytes);  	/* @@ -1412,8 +1446,8 @@ xlog_sync(xlog_t		*log,  		 roundoff < BBTOB(1)));  	/* move grant heads by roundoff in sync */ -	xlog_grant_add_space(log, &log->l_grant_reserve_head, roundoff); -	xlog_grant_add_space(log, &log->l_grant_write_head, roundoff); +	xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff); +	xlog_grant_add_space(log, &log->l_write_head.grant, roundoff);  	/* put cycle number in every block */  	xlog_pack_data(log, iclog, roundoff);  @@ -2566,119 +2600,6 @@ restart:  	return 0;  }	/* xlog_state_get_iclog_space */ -/* - * Atomically get the log space required for a log ticket. - * - * Once a ticket gets put onto the reserveq, it will only return after the - * needed reservation is satisfied. - * - * This function is structured so that it has a lock free fast path. This is - * necessary because every new transaction reservation will come through this - * path. Hence any lock will be globally hot if we take it unconditionally on - * every pass. - * - * As tickets are only ever moved on and off the reserveq under the - * l_grant_reserve_lock, we only need to take that lock if we are going to add - * the ticket to the queue and sleep. We can avoid taking the lock if the ticket - * was never added to the reserveq because the t_queue list head will be empty - * and we hold the only reference to it so it can safely be checked unlocked. - */ -STATIC int -xlog_grant_log_space( -	struct log		*log, -	struct xlog_ticket	*tic) -{ -	int			free_bytes, need_bytes; -	int			error = 0; - -	ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); - -	trace_xfs_log_grant_enter(log, tic); - -	/* -	 * If there are other waiters on the queue then give them a chance at -	 * logspace before us.  Wake up the first waiters, if we do not wake -	 * up all the waiters then go to sleep waiting for more free space, -	 * otherwise try to get some space for this transaction. -	 */ -	need_bytes = tic->t_unit_res; -	if (tic->t_flags & XFS_LOG_PERM_RESERV) -		need_bytes *= tic->t_ocnt; -	free_bytes = xlog_space_left(log, &log->l_grant_reserve_head); -	if (!list_empty_careful(&log->l_reserveq)) { -		spin_lock(&log->l_grant_reserve_lock); -		if (!xlog_reserveq_wake(log, &free_bytes) || -		    free_bytes < need_bytes) -			error = xlog_reserveq_wait(log, tic, need_bytes); -		spin_unlock(&log->l_grant_reserve_lock); -	} else if (free_bytes < need_bytes) { -		spin_lock(&log->l_grant_reserve_lock); -		error = xlog_reserveq_wait(log, tic, need_bytes); -		spin_unlock(&log->l_grant_reserve_lock); -	} -	if (error) -		return error; - -	xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes); -	xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes); -	trace_xfs_log_grant_exit(log, tic); -	xlog_verify_grant_tail(log); -	return 0; -} - -/* - * Replenish the byte reservation required by moving the grant write head. - * - * Similar to xlog_grant_log_space, the function is structured to have a lock - * free fast path. - */ -STATIC int -xlog_regrant_write_log_space( -	struct log		*log, -	struct xlog_ticket	*tic) -{ -	int			free_bytes, need_bytes; -	int			error = 0; - -	tic->t_curr_res = tic->t_unit_res; -	xlog_tic_reset_res(tic); - -	if (tic->t_cnt > 0) -		return 0; - -	ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); - -	trace_xfs_log_regrant_write_enter(log, tic); - -	/* -	 * If there are other waiters on the queue then give them a chance at -	 * logspace before us.  Wake up the first waiters, if we do not wake -	 * up all the waiters then go to sleep waiting for more free space, -	 * otherwise try to get some space for this transaction. -	 */ -	need_bytes = tic->t_unit_res; -	free_bytes = xlog_space_left(log, &log->l_grant_write_head); -	if (!list_empty_careful(&log->l_writeq)) { -		spin_lock(&log->l_grant_write_lock); -		if (!xlog_writeq_wake(log, &free_bytes) || -		    free_bytes < need_bytes) -			error = xlog_writeq_wait(log, tic, need_bytes); -		spin_unlock(&log->l_grant_write_lock); -	} else if (free_bytes < need_bytes) { -		spin_lock(&log->l_grant_write_lock); -		error = xlog_writeq_wait(log, tic, need_bytes); -		spin_unlock(&log->l_grant_write_lock); -	} - -	if (error) -		return error; - -	xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes); -	trace_xfs_log_regrant_write_exit(log, tic); -	xlog_verify_grant_tail(log); -	return 0; -} -  /* The first cnt-1 times through here we don't need to   * move the grant write head because the permanent   * reservation has reserved cnt times the unit amount. @@ -2695,9 +2616,9 @@ xlog_regrant_reserve_log_space(xlog_t	     *log,  	if (ticket->t_cnt > 0)  		ticket->t_cnt--; -	xlog_grant_sub_space(log, &log->l_grant_reserve_head, +	xlog_grant_sub_space(log, &log->l_reserve_head.grant,  					ticket->t_curr_res); -	xlog_grant_sub_space(log, &log->l_grant_write_head, +	xlog_grant_sub_space(log, &log->l_write_head.grant,  					ticket->t_curr_res);  	ticket->t_curr_res = ticket->t_unit_res;  	xlog_tic_reset_res(ticket); @@ -2708,7 +2629,7 @@ xlog_regrant_reserve_log_space(xlog_t	     *log,  	if (ticket->t_cnt > 0)  		return; -	xlog_grant_add_space(log, &log->l_grant_reserve_head, +	xlog_grant_add_space(log, &log->l_reserve_head.grant,  					ticket->t_unit_res);  	trace_xfs_log_regrant_reserve_exit(log, ticket); @@ -2754,14 +2675,13 @@ xlog_ungrant_log_space(xlog_t	     *log,  		bytes += ticket->t_unit_res*ticket->t_cnt;  	} -	xlog_grant_sub_space(log, &log->l_grant_reserve_head, bytes); -	xlog_grant_sub_space(log, &log->l_grant_write_head, bytes); +	xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes); +	xlog_grant_sub_space(log, &log->l_write_head.grant, bytes);  	trace_xfs_log_ungrant_exit(log, ticket); -	xfs_log_move_tail(log->l_mp, 1); -}	/* xlog_ungrant_log_space */ - +	xfs_log_space_wake(log->l_mp); +}  /*   * Flush iclog to disk if this is the last reference to the given iclog and @@ -3219,7 +3139,7 @@ xlog_ticket_alloc(  	int		unit_bytes,  	int		cnt,  	char		client, -	uint		xflags, +	bool		permanent,  	int		alloc_flags)  {  	struct xlog_ticket *tic; @@ -3313,6 +3233,7 @@ xlog_ticket_alloc(          }  	atomic_set(&tic->t_ref, 1); +	tic->t_task		= current;  	INIT_LIST_HEAD(&tic->t_queue);  	tic->t_unit_res		= unit_bytes;  	tic->t_curr_res		= unit_bytes; @@ -3322,9 +3243,8 @@ xlog_ticket_alloc(  	tic->t_clientid		= client;  	tic->t_flags		= XLOG_TIC_INITED;  	tic->t_trans_type	= 0; -	if (xflags & XFS_LOG_PERM_RESERV) +	if (permanent)  		tic->t_flags |= XLOG_TIC_PERM_RESERV; -	init_waitqueue_head(&tic->t_wait);  	xlog_tic_reset_res(tic); @@ -3380,7 +3300,7 @@ xlog_verify_grant_tail(  	int		tail_cycle, tail_blocks;  	int		cycle, space; -	xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space); +	xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space);  	xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);  	if (tail_cycle != cycle) {  		if (cycle - 1 != tail_cycle && @@ -3582,7 +3502,6 @@ xfs_log_force_umount(  	struct xfs_mount	*mp,  	int			logerror)  { -	xlog_ticket_t	*tic;  	xlog_t		*log;  	int		retval; @@ -3650,15 +3569,8 @@ xfs_log_force_umount(  	 * we don't enqueue anything once the SHUTDOWN flag is set, and this  	 * action is protected by the grant locks.  	 */ -	spin_lock(&log->l_grant_reserve_lock); -	list_for_each_entry(tic, &log->l_reserveq, t_queue) -		wake_up(&tic->t_wait); -	spin_unlock(&log->l_grant_reserve_lock); - -	spin_lock(&log->l_grant_write_lock); -	list_for_each_entry(tic, &log->l_writeq, t_queue) -		wake_up(&tic->t_wait); -	spin_unlock(&log->l_grant_write_lock); +	xlog_grant_head_wake_all(&log->l_reserve_head); +	xlog_grant_head_wake_all(&log->l_write_head);  	if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) {  		ASSERT(!logerror); | 
