@@ -48,6 +48,7 @@ struct wireguard_peer *peer_create(struct wireguard_device *wg, const u8 public_
48
48
spin_lock_init (& peer -> keypairs .keypair_update_lock );
49
49
INIT_WORK (& peer -> transmit_handshake_work , packet_handshake_send_worker );
50
50
rwlock_init (& peer -> endpoint_lock );
51
+ atomic_set (& peer -> dead_count , 1 );
51
52
kref_init (& peer -> refcount );
52
53
skb_queue_head_init (& peer -> staged_packet_queue );
53
54
peer -> last_sent_handshake = ktime_get_boot_fast_ns () - (u64 )(REKEY_TIMEOUT + 1 ) * NSEC_PER_SEC ;
@@ -86,27 +87,47 @@ void peer_remove(struct wireguard_peer *peer)
86
87
if (unlikely (!peer ))
87
88
return ;
88
89
lockdep_assert_held (& peer -> device -> device_update_lock );
90
+
91
+ /* Remove from configuration-time lookup structures so new packets can't enter. */
92
+ list_del_init (& peer -> peer_list );
89
93
allowedips_remove_by_peer (& peer -> device -> peer_allowedips , peer , & peer -> device -> device_update_lock );
90
94
pubkey_hashtable_remove (& peer -> device -> peer_hashtable , peer );
91
- skb_queue_purge (& peer -> staged_packet_queue );
95
+
96
+ /* Mark as dead, so that we don't allow jumping contexts after. */
97
+ while (atomic_cmpxchg (& peer -> dead_count , 1 , 0 ) != 1 ) cpu_relax ();
98
+
99
+ /* The transition between packet encryption/decryption queues isn't guarded
100
+ * by the dead_count, but each reference's life is strictly bounded by
101
+ * two generations: once for parallel crypto and once for serial ingestion,
102
+ * so we can simply flush twice, and be sure that we no longer have references
103
+ * inside these queues.
104
+ */
105
+
106
+ /* The first flush is for encrypt/decrypt. */
107
+ flush_workqueue (peer -> device -> packet_crypt_wq );
108
+ /* The second.1 flush is for send (but not receive, since that's napi). */
109
+ flush_workqueue (peer -> device -> packet_crypt_wq );
110
+ /* The second.2 flush is for receive (but not send, since that's wq). */
111
+ napi_disable (& peer -> napi );
112
+ /* It's now safe to remove the napi struct, which must be done here from process context. */
113
+ netif_napi_del (& peer -> napi );
114
+ /* Ensure any workstructs we own (like transmit_handshake_work or clear_peer_work) no longer are in use. */
115
+ flush_workqueue (peer -> device -> handshake_send_wq );
116
+
117
+ /* Remove keys and handshakes from memory. Handshake removal must be done here from process context. */
92
118
noise_handshake_clear (& peer -> handshake );
93
119
noise_keypairs_clear (& peer -> keypairs );
94
- list_del_init (& peer -> peer_list );
120
+
121
+ /* Destroy all ongoing timers that were in-flight at the beginning of this function. */
95
122
timers_stop (peer );
96
- flush_workqueue (peer -> device -> packet_crypt_wq ); /* The first flush is for encrypt/decrypt. */
97
- flush_workqueue (peer -> device -> packet_crypt_wq ); /* The second.1 flush is for send (but not receive, since that's napi). */
98
- napi_disable (& peer -> napi ); /* The second.2 flush is for receive (but not send, since that's wq). */
99
- flush_workqueue (peer -> device -> handshake_send_wq );
100
- netif_napi_del (& peer -> napi );
123
+
101
124
-- peer -> device -> num_peers ;
102
125
peer_put (peer );
103
126
}
104
127
105
128
static void rcu_release (struct rcu_head * rcu )
106
129
{
107
130
struct wireguard_peer * peer = container_of (rcu , struct wireguard_peer , rcu );
108
-
109
- pr_debug ("%s: Peer %llu (%pISpfsc) destroyed\n" , peer -> device -> dev -> name , peer -> internal_id , & peer -> endpoint .addr );
110
131
dst_cache_destroy (& peer -> endpoint_cache );
111
132
packet_queue_free (& peer -> rx_queue , false);
112
133
packet_queue_free (& peer -> tx_queue , false);
@@ -116,9 +137,12 @@ static void rcu_release(struct rcu_head *rcu)
116
137
static void kref_release (struct kref * refcount )
117
138
{
118
139
struct wireguard_peer * peer = container_of (refcount , struct wireguard_peer , refcount );
119
-
140
+ pr_debug ("%s: Peer %llu (%pISpfsc) destroyed\n" , peer -> device -> dev -> name , peer -> internal_id , & peer -> endpoint .addr );
141
+ /* Remove ourself from dynamic runtime lookup structures, now that the last reference is gone. */
120
142
index_hashtable_remove (& peer -> device -> index_hashtable , & peer -> handshake .entry );
143
+ /* Remove any lingering packets that didn't have a chance to be transmitted. */
121
144
skb_queue_purge (& peer -> staged_packet_queue );
145
+ /* Free the memory used. */
122
146
call_rcu_bh (& peer -> rcu , rcu_release );
123
147
}
124
148
0 commit comments