mirror of
				https://kernel.googlesource.com/pub/scm/linux/kernel/git/torvalds/linux
				synced 2025-11-01 03:23:39 +10:00 
			
		
		
		
	mptcp: track some aggregate data counters
Currently there are no data transfer counters accounting for all the subflows used by a given MPTCP socket. The user-space can compute such figures aggregating the subflow info, but that is inaccurate if any subflow is closed before the MPTCP socket itself. Add the new counters in the MPTCP socket itself and expose them via the existing diag and sockopt. While touching mptcp_diag_fill_info(), acquire the relevant locks before fetching the msk data, to ensure better data consistency Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/385 Signed-off-by: Paolo Abeni <pabeni@redhat.com> Reviewed-by: Matthieu Baerts <matthieu.baerts@tessares.net> Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
		
							parent
							
								
									c026d33b8f
								
							
						
					
					
						commit
						38967f424b
					
				| @ -123,6 +123,11 @@ struct mptcp_info { | ||||
| 	__u8	mptcpi_local_addr_used; | ||||
| 	__u8	mptcpi_local_addr_max; | ||||
| 	__u8	mptcpi_csum_enabled; | ||||
| 	__u32	mptcpi_retransmits; | ||||
| 	__u64	mptcpi_bytes_retrans; | ||||
| 	__u64	mptcpi_bytes_sent; | ||||
| 	__u64	mptcpi_bytes_received; | ||||
| 	__u64	mptcpi_bytes_acked; | ||||
| }; | ||||
| 
 | ||||
| /*
 | ||||
|  | ||||
| @ -1026,6 +1026,12 @@ u64 __mptcp_expand_seq(u64 old_seq, u64 cur_seq) | ||||
| 	return cur_seq; | ||||
| } | ||||
| 
 | ||||
| static void __mptcp_snd_una_update(struct mptcp_sock *msk, u64 new_snd_una) | ||||
| { | ||||
| 	msk->bytes_acked += new_snd_una - msk->snd_una; | ||||
| 	msk->snd_una = new_snd_una; | ||||
| } | ||||
| 
 | ||||
| static void ack_update_msk(struct mptcp_sock *msk, | ||||
| 			   struct sock *ssk, | ||||
| 			   struct mptcp_options_received *mp_opt) | ||||
| @ -1057,7 +1063,7 @@ static void ack_update_msk(struct mptcp_sock *msk, | ||||
| 		__mptcp_check_push(sk, ssk); | ||||
| 
 | ||||
| 	if (after64(new_snd_una, old_snd_una)) { | ||||
| 		msk->snd_una = new_snd_una; | ||||
| 		__mptcp_snd_una_update(msk, new_snd_una); | ||||
| 		__mptcp_data_acked(sk); | ||||
| 	} | ||||
| 	mptcp_data_unlock(sk); | ||||
| @ -1123,7 +1129,7 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) | ||||
| 		/* on fallback we just need to ignore the msk-level snd_una, as
 | ||||
| 		 * this is really plain TCP | ||||
| 		 */ | ||||
| 		msk->snd_una = READ_ONCE(msk->snd_nxt); | ||||
| 		__mptcp_snd_una_update(msk, READ_ONCE(msk->snd_nxt)); | ||||
| 
 | ||||
| 		__mptcp_data_acked(subflow->conn); | ||||
| 		mptcp_data_unlock(subflow->conn); | ||||
|  | ||||
| @ -377,6 +377,7 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, | ||||
| 
 | ||||
| 	if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) { | ||||
| 		/* in sequence */ | ||||
| 		msk->bytes_received += copy_len; | ||||
| 		WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len); | ||||
| 		tail = skb_peek_tail(&sk->sk_receive_queue); | ||||
| 		if (tail && mptcp_try_coalesce(sk, tail, skb)) | ||||
| @ -760,6 +761,7 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk) | ||||
| 			MPTCP_SKB_CB(skb)->map_seq += delta; | ||||
| 			__skb_queue_tail(&sk->sk_receive_queue, skb); | ||||
| 		} | ||||
| 		msk->bytes_received += end_seq - msk->ack_seq; | ||||
| 		msk->ack_seq = end_seq; | ||||
| 		moved = true; | ||||
| 	} | ||||
| @ -1531,8 +1533,10 @@ static void mptcp_update_post_push(struct mptcp_sock *msk, | ||||
| 	 * that has been handed to the subflow for transmission | ||||
| 	 * and skip update in case it was old dfrag. | ||||
| 	 */ | ||||
| 	if (likely(after64(snd_nxt_new, msk->snd_nxt))) | ||||
| 	if (likely(after64(snd_nxt_new, msk->snd_nxt))) { | ||||
| 		msk->bytes_sent += snd_nxt_new - msk->snd_nxt; | ||||
| 		msk->snd_nxt = snd_nxt_new; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| void mptcp_check_and_set_pending(struct sock *sk) | ||||
| @ -2590,6 +2594,7 @@ static void __mptcp_retrans(struct sock *sk) | ||||
| 	} | ||||
| 	if (copied) { | ||||
| 		dfrag->already_sent = max(dfrag->already_sent, info.sent); | ||||
| 		msk->bytes_retrans += copied; | ||||
| 		tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, | ||||
| 			 info.size_goal); | ||||
| 		WRITE_ONCE(msk->allow_infinite_fallback, false); | ||||
| @ -3102,6 +3107,10 @@ static int mptcp_disconnect(struct sock *sk, int flags) | ||||
| 	WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk))); | ||||
| 	mptcp_pm_data_reset(msk); | ||||
| 	mptcp_ca_reset(sk); | ||||
| 	msk->bytes_acked = 0; | ||||
| 	msk->bytes_received = 0; | ||||
| 	msk->bytes_sent = 0; | ||||
| 	msk->bytes_retrans = 0; | ||||
| 
 | ||||
| 	WRITE_ONCE(sk->sk_shutdown, 0); | ||||
| 	sk_error_report(sk); | ||||
|  | ||||
| @ -262,10 +262,13 @@ struct mptcp_sock { | ||||
| 	u64		local_key; | ||||
| 	u64		remote_key; | ||||
| 	u64		write_seq; | ||||
| 	u64		bytes_sent; | ||||
| 	u64		snd_nxt; | ||||
| 	u64		bytes_received; | ||||
| 	u64		ack_seq; | ||||
| 	atomic64_t	rcv_wnd_sent; | ||||
| 	u64		rcv_data_fin_seq; | ||||
| 	u64		bytes_retrans; | ||||
| 	int		rmem_fwd_alloc; | ||||
| 	struct sock	*last_snd; | ||||
| 	int		snd_burst; | ||||
| @ -274,6 +277,7 @@ struct mptcp_sock { | ||||
| 						 * recovery related fields are under data_lock | ||||
| 						 * protection | ||||
| 						 */ | ||||
| 	u64		bytes_acked; | ||||
| 	u64		snd_una; | ||||
| 	u64		wnd_end; | ||||
| 	unsigned long	timer_ival; | ||||
|  | ||||
| @ -889,7 +889,9 @@ out: | ||||
| 
 | ||||
| void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info) | ||||
| { | ||||
| 	struct sock *sk = (struct sock *)msk; | ||||
| 	u32 flags = 0; | ||||
| 	bool slow; | ||||
| 
 | ||||
| 	memset(info, 0, sizeof(*info)); | ||||
| 
 | ||||
| @ -898,6 +900,9 @@ void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info) | ||||
| 	info->mptcpi_add_addr_accepted = READ_ONCE(msk->pm.add_addr_accepted); | ||||
| 	info->mptcpi_local_addr_used = READ_ONCE(msk->pm.local_addr_used); | ||||
| 
 | ||||
| 	if (inet_sk_state_load(sk) == TCP_LISTEN) | ||||
| 		return; | ||||
| 
 | ||||
| 	/* The following limits only make sense for the in-kernel PM */ | ||||
| 	if (mptcp_pm_is_kernel(msk)) { | ||||
| 		info->mptcpi_subflows_max = | ||||
| @ -915,11 +920,21 @@ void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info) | ||||
| 	if (READ_ONCE(msk->can_ack)) | ||||
| 		flags |= MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED; | ||||
| 	info->mptcpi_flags = flags; | ||||
| 	info->mptcpi_token = READ_ONCE(msk->token); | ||||
| 	info->mptcpi_write_seq = READ_ONCE(msk->write_seq); | ||||
| 	info->mptcpi_snd_una = READ_ONCE(msk->snd_una); | ||||
| 	info->mptcpi_rcv_nxt = READ_ONCE(msk->ack_seq); | ||||
| 	info->mptcpi_csum_enabled = READ_ONCE(msk->csum_enabled); | ||||
| 	mptcp_data_lock(sk); | ||||
| 	info->mptcpi_snd_una = msk->snd_una; | ||||
| 	info->mptcpi_rcv_nxt = msk->ack_seq; | ||||
| 	info->mptcpi_bytes_acked = msk->bytes_acked; | ||||
| 	mptcp_data_unlock(sk); | ||||
| 
 | ||||
| 	slow = lock_sock_fast(sk); | ||||
| 	info->mptcpi_csum_enabled = msk->csum_enabled; | ||||
| 	info->mptcpi_token = msk->token; | ||||
| 	info->mptcpi_write_seq = msk->write_seq; | ||||
| 	info->mptcpi_retransmits = inet_csk(sk)->icsk_retransmits; | ||||
| 	info->mptcpi_bytes_sent = msk->bytes_sent; | ||||
| 	info->mptcpi_bytes_received = msk->bytes_received; | ||||
| 	info->mptcpi_bytes_retrans = msk->bytes_retrans; | ||||
| 	unlock_sock_fast(sk, slow); | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(mptcp_diag_fill_info); | ||||
| 
 | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user