aboutsummaryrefslogtreecommitdiff
path: root/net/unix
diff options
context:
space:
mode:
Diffstat (limited to 'net/unix')
-rw-r--r--net/unix/af_unix.c105
-rw-r--r--net/unix/diag.c20
-rw-r--r--net/unix/garbage.c20
-rw-r--r--net/unix/unix_bpf.c8
4 files changed, 100 insertions, 53 deletions
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index bf338b782fc4..f0c2293f1d3b 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -569,12 +569,6 @@ static void unix_sock_destructor(struct sock *sk)
skb_queue_purge(&sk->sk_receive_queue);
-#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
- if (u->oob_skb) {
- kfree_skb(u->oob_skb);
- u->oob_skb = NULL;
- }
-#endif
DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
@@ -620,6 +614,13 @@ static void unix_release_sock(struct sock *sk, int embrion)
unix_state_unlock(sk);
+#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
+ if (u->oob_skb) {
+ kfree_skb(u->oob_skb);
+ u->oob_skb = NULL;
+ }
+#endif
+
wake_up_interruptible_all(&u->peer_wait);
if (skpair != NULL) {
@@ -785,15 +786,45 @@ static int unix_set_peek_off(struct sock *sk, int val)
}
#ifdef CONFIG_PROC_FS
+static int unix_count_nr_fds(struct sock *sk)
+{
+ struct sk_buff *skb;
+ struct unix_sock *u;
+ int nr_fds = 0;
+
+ spin_lock(&sk->sk_receive_queue.lock);
+ skb = skb_peek(&sk->sk_receive_queue);
+ while (skb) {
+ u = unix_sk(skb->sk);
+ nr_fds += atomic_read(&u->scm_stat.nr_fds);
+ skb = skb_peek_next(skb, &sk->sk_receive_queue);
+ }
+ spin_unlock(&sk->sk_receive_queue.lock);
+
+ return nr_fds;
+}
+
static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
{
struct sock *sk = sock->sk;
struct unix_sock *u;
+ int nr_fds;
if (sk) {
- u = unix_sk(sock->sk);
- seq_printf(m, "scm_fds: %u\n",
- atomic_read(&u->scm_stat.nr_fds));
+ u = unix_sk(sk);
+ if (sock->type == SOCK_DGRAM) {
+ nr_fds = atomic_read(&u->scm_stat.nr_fds);
+ goto out_print;
+ }
+
+ unix_state_lock(sk);
+ if (sk->sk_state != TCP_LISTEN)
+ nr_fds = atomic_read(&u->scm_stat.nr_fds);
+ else
+ nr_fds = unix_count_nr_fds(sk);
+ unix_state_unlock(sk);
+out_print:
+ seq_printf(m, "scm_fds: %u\n", nr_fds);
}
}
#else
@@ -1116,7 +1147,7 @@ static int unix_autobind(struct sock *sk)
addr->name->sun_family = AF_UNIX;
refcount_set(&addr->refcnt, 1);
- ordernum = prandom_u32();
+ ordernum = get_random_u32();
lastnum = ordernum & 0xFFFFF;
retry:
ordernum = (ordernum + 1) & 0xFFFFF;
@@ -1968,13 +1999,20 @@ restart_locked:
unix_state_lock(sk);
err = 0;
- if (unix_peer(sk) == other) {
+ if (sk->sk_type == SOCK_SEQPACKET) {
+ /* We are here only when racing with unix_release_sock()
+ * is clearing @other. Never change state to TCP_CLOSE
+ * unlike SOCK_DGRAM wants.
+ */
+ unix_state_unlock(sk);
+ err = -EPIPE;
+ } else if (unix_peer(sk) == other) {
unix_peer(sk) = NULL;
unix_dgram_peer_wake_disconnect_wakeup(sk, other);
+ sk->sk_state = TCP_CLOSE;
unix_state_unlock(sk);
- sk->sk_state = TCP_CLOSE;
unix_dgram_disconnected(sk, other);
sock_put(other);
err = -ECONNREFUSED;
@@ -2506,32 +2544,18 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t si
static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
{
- int copied = 0;
-
- while (1) {
- struct unix_sock *u = unix_sk(sk);
- struct sk_buff *skb;
- int used, err;
-
- mutex_lock(&u->iolock);
- skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
- mutex_unlock(&u->iolock);
- if (!skb)
- return err;
+ struct unix_sock *u = unix_sk(sk);
+ struct sk_buff *skb;
+ int err, copied;
- used = recv_actor(sk, skb);
- if (used <= 0) {
- if (!copied)
- copied = used;
- kfree_skb(skb);
- break;
- } else if (used <= skb->len) {
- copied += used;
- }
+ mutex_lock(&u->iolock);
+ skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
+ mutex_unlock(&u->iolock);
+ if (!skb)
+ return err;
- kfree_skb(skb);
- break;
- }
+ copied = recv_actor(sk, skb);
+ kfree_skb(skb);
return copied;
}
@@ -2543,13 +2567,14 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
struct sk_buff *last, unsigned int last_len,
bool freezable)
{
+ unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE;
struct sk_buff *tail;
DEFINE_WAIT(wait);
unix_state_lock(sk);
for (;;) {
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, state);
tail = skb_peek_tail(&sk->sk_receive_queue);
if (tail != last ||
@@ -2562,10 +2587,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
unix_state_unlock(sk);
- if (freezable)
- timeo = freezable_schedule_timeout(timeo);
- else
- timeo = schedule_timeout(timeo);
+ timeo = schedule_timeout(timeo);
unix_state_lock(sk);
if (sock_flag(sk, SOCK_DEAD))
@@ -3723,6 +3745,7 @@ static int __init af_unix_init(void)
rc = proto_register(&unix_stream_proto, 1);
if (rc != 0) {
pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
+ proto_unregister(&unix_dgram_proto);
goto out;
}
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 105f522a89fe..616b55c5b890 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -114,14 +114,16 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql);
}
-static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb)
+static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb,
+ struct user_namespace *user_ns)
{
- uid_t uid = from_kuid_munged(sk_user_ns(nlskb->sk), sock_i_uid(sk));
+ uid_t uid = from_kuid_munged(user_ns, sock_i_uid(sk));
return nla_put(nlskb, UNIX_DIAG_UID, sizeof(uid_t), &uid);
}
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
- u32 portid, u32 seq, u32 flags, int sk_ino)
+ struct user_namespace *user_ns,
+ u32 portid, u32 seq, u32 flags, int sk_ino)
{
struct nlmsghdr *nlh;
struct unix_diag_msg *rep;
@@ -167,7 +169,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
goto out_nlmsg_trim;
if ((req->udiag_show & UDIAG_SHOW_UID) &&
- sk_diag_dump_uid(sk, skb))
+ sk_diag_dump_uid(sk, skb, user_ns))
goto out_nlmsg_trim;
nlmsg_end(skb, nlh);
@@ -179,7 +181,8 @@ out_nlmsg_trim:
}
static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
- u32 portid, u32 seq, u32 flags)
+ struct user_namespace *user_ns,
+ u32 portid, u32 seq, u32 flags)
{
int sk_ino;
@@ -190,7 +193,7 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
if (!sk_ino)
return 0;
- return sk_diag_fill(sk, skb, req, portid, seq, flags, sk_ino);
+ return sk_diag_fill(sk, skb, req, user_ns, portid, seq, flags, sk_ino);
}
static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
@@ -214,7 +217,7 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
goto next;
if (!(req->udiag_states & (1 << sk->sk_state)))
goto next;
- if (sk_diag_dump(sk, skb, req,
+ if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk),
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI) < 0) {
@@ -282,7 +285,8 @@ again:
if (!rep)
goto out;
- err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).portid,
+ err = sk_diag_fill(sk, rep, req, sk_user_ns(NETLINK_CB(in_skb).sk),
+ NETLINK_CB(in_skb).portid,
nlh->nlmsg_seq, 0, req->udiag_ino);
if (err < 0) {
nlmsg_free(rep);
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index d45d5366115a..dc2763540393 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -204,6 +204,7 @@ void wait_for_unix_gc(void)
/* The external entry point: unix_gc() */
void unix_gc(void)
{
+ struct sk_buff *next_skb, *skb;
struct unix_sock *u;
struct unix_sock *next;
struct sk_buff_head hitlist;
@@ -297,11 +298,30 @@ void unix_gc(void)
spin_unlock(&unix_gc_lock);
+ /* We need io_uring to clean its registered files, ignore all io_uring
+ * originated skbs. It's fine as io_uring doesn't keep references to
+ * other io_uring instances and so killing all other files in the cycle
+ * will put all io_uring references forcing it to go through normal
+ * release.path eventually putting registered files.
+ */
+ skb_queue_walk_safe(&hitlist, skb, next_skb) {
+ if (skb->scm_io_uring) {
+ __skb_unlink(skb, &hitlist);
+ skb_queue_tail(&skb->sk->sk_receive_queue, skb);
+ }
+ }
+
/* Here we are. Hitlist is filled. Die. */
__skb_queue_purge(&hitlist);
spin_lock(&unix_gc_lock);
+ /* There could be io_uring registered files, just push them back to
+ * the inflight list
+ */
+ list_for_each_entry_safe(u, next, &gc_candidates, link)
+ list_move_tail(&u->link, &gc_inflight_list);
+
/* All candidates should have been detached by now. */
BUG_ON(!list_empty(&gc_candidates));
diff --git a/net/unix/unix_bpf.c b/net/unix/unix_bpf.c
index 7cf14c6b1725..e9bf15513961 100644
--- a/net/unix/unix_bpf.c
+++ b/net/unix/unix_bpf.c
@@ -145,12 +145,12 @@ int unix_dgram_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool re
if (restore) {
sk->sk_write_space = psock->saved_write_space;
- WRITE_ONCE(sk->sk_prot, psock->sk_proto);
+ sock_replace_proto(sk, psock->sk_proto);
return 0;
}
unix_dgram_bpf_check_needs_rebuild(psock->sk_proto);
- WRITE_ONCE(sk->sk_prot, &unix_dgram_bpf_prot);
+ sock_replace_proto(sk, &unix_dgram_bpf_prot);
return 0;
}
@@ -158,12 +158,12 @@ int unix_stream_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool r
{
if (restore) {
sk->sk_write_space = psock->saved_write_space;
- WRITE_ONCE(sk->sk_prot, psock->sk_proto);
+ sock_replace_proto(sk, psock->sk_proto);
return 0;
}
unix_stream_bpf_check_needs_rebuild(psock->sk_proto);
- WRITE_ONCE(sk->sk_prot, &unix_stream_bpf_prot);
+ sock_replace_proto(sk, &unix_stream_bpf_prot);
return 0;
}