diff options
Diffstat (limited to 'net')
34 files changed, 1757 insertions, 969 deletions
diff --git a/net/9p/Kconfig b/net/9p/Kconfig index 71bc110aebf8..bafc50c9e6ff 100644 --- a/net/9p/Kconfig +++ b/net/9p/Kconfig @@ -23,6 +23,13 @@ config NET_9P_FD file descriptors. TCP/IP is the default transport for 9p, so if you are going to use 9p, you'll likely want this. +config NET_9P_VIRTIO + depends on NET_9P && EXPERIMENTAL && VIRTIO + tristate "9P Virtio Transport (Experimental)" + help + This builds support for a transports between + guest partitions and a host partition. + config NET_9P_DEBUG bool "Debug information" depends on NET_9P diff --git a/net/9p/Makefile b/net/9p/Makefile index 5059bc06f8f3..d3abb246ccab 100644 --- a/net/9p/Makefile +++ b/net/9p/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_NET_9P) := 9pnet.o obj-$(CONFIG_NET_9P_FD) += 9pnet_fd.o +obj-$(CONFIG_NET_9P_VIRTIO) += 9pnet_virtio.o 9pnet-objs := \ mod.o \ @@ -12,3 +13,6 @@ obj-$(CONFIG_NET_9P_FD) += 9pnet_fd.o 9pnet_fd-objs := \ trans_fd.o \ + +9pnet_virtio-objs := \ + trans_virtio.o \ diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c new file mode 100644 index 000000000000..40b71a29fc3f --- /dev/null +++ b/net/9p/trans_virtio.c @@ -0,0 +1,353 @@ +/* + * The Guest 9p transport driver + * + * This is a trivial pipe-based transport driver based on the lguest console + * code: we use lguest's DMA mechanism to send bytes out, and register a + * DMA buffer to receive bytes in. It is assumed to be present and available + * from the very beginning of boot. + * + * This may be have been done by just instaniating another HVC console, + * but HVC's blocksize of 16 bytes is annoying and painful to performance. + * + * A more efficient transport could be built based on the virtio block driver + * but it requires some changes in the 9p transport model (which are in + * progress) + * + */ +/* + * Copyright (C) 2007 Eric Van Hensbergen, IBM Corporation + * + * Based on virtio console driver + * Copyright (C) 2006, 2007 Rusty Russell, IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#include <linux/in.h> +#include <linux/module.h> +#include <linux/net.h> +#include <linux/ipv6.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/un.h> +#include <linux/uaccess.h> +#include <linux/inet.h> +#include <linux/idr.h> +#include <linux/file.h> +#include <net/9p/9p.h> +#include <linux/parser.h> +#include <net/9p/transport.h> +#include <linux/scatterlist.h> +#include <linux/virtio.h> +#include <linux/virtio_9p.h> + +/* a single mutex to manage channel initialization and attachment */ +static DECLARE_MUTEX(virtio_9p_lock); +/* global which tracks highest initialized channel */ +static int chan_index; + +/* We keep all per-channel information in a structure. + * This structure is allocated within the devices dev->mem space. + * A pointer to the structure will get put in the transport private. + */ +static struct virtio_chan { + bool initialized; /* channel is initialized */ + bool inuse; /* channel is in use */ + + struct virtqueue *in_vq, *out_vq; + struct virtio_device *vdev; + + /* This is our input buffer, and how much data is left in it. */ + unsigned int in_len; + char *in, *inbuf; + + wait_queue_head_t wq; /* waitq for buffer */ +} channels[MAX_9P_CHAN]; + +/* How many bytes left in this page. */ +static unsigned int rest_of_page(void *data) +{ + return PAGE_SIZE - ((unsigned long)data % PAGE_SIZE); +} + +static int p9_virtio_write(struct p9_trans *trans, void *buf, int count) +{ + struct virtio_chan *chan = (struct virtio_chan *) trans->priv; + struct virtqueue *out_vq = chan->out_vq; + struct scatterlist sg[1]; + unsigned int len; + + P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio write (%d)\n", count); + + /* keep it simple - make sure we don't overflow a page */ + if (rest_of_page(buf) < count) + count = rest_of_page(buf); + + sg_init_one(sg, buf, count); + + /* add_buf wants a token to identify this buffer: we hand it any + * non-NULL pointer, since there's only ever one buffer. */ + if (out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, (void *)1) == 0) { + /* Tell Host to go! */ + out_vq->vq_ops->kick(out_vq); + /* Chill out until it's done with the buffer. */ + while (!out_vq->vq_ops->get_buf(out_vq, &len)) + cpu_relax(); + } + + P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio wrote (%d)\n", count); + + /* We're expected to return the amount of data we wrote: all of it. */ + return count; +} + +/* Create a scatter-gather list representing our input buffer and put it in the + * queue. */ +static void add_inbuf(struct virtio_chan *chan) +{ + struct scatterlist sg[1]; + + sg_init_one(sg, chan->inbuf, PAGE_SIZE); + + /* We should always be able to add one buffer to an empty queue. */ + if (chan->in_vq->vq_ops->add_buf(chan->in_vq, sg, 0, 1, chan->inbuf)) + BUG(); + chan->in_vq->vq_ops->kick(chan->in_vq); +} + +static int p9_virtio_read(struct p9_trans *trans, void *buf, int count) +{ + struct virtio_chan *chan = (struct virtio_chan *) trans->priv; + struct virtqueue *in_vq = chan->in_vq; + + P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio read (%d)\n", count); + + /* If we don't have an input queue yet, we can't get input. */ + BUG_ON(!in_vq); + + /* No buffer? Try to get one. */ + if (!chan->in_len) { + chan->in = in_vq->vq_ops->get_buf(in_vq, &chan->in_len); + if (!chan->in) + return 0; + } + + /* You want more than we have to give? Well, try wanting less! */ + if (chan->in_len < count) + count = chan->in_len; + + /* Copy across to their buffer and increment offset. */ + memcpy(buf, chan->in, count); + chan->in += count; + chan->in_len -= count; + + /* Finished? Re-register buffer so Host will use it again. */ + if (chan->in_len == 0) + add_inbuf(chan); + + P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio finished read (%d)\n", + count); + + return count; +} + +/* The poll function is used by 9p transports to determine if there + * is there is activity available on a particular channel. In our case + * we use it to wait for a callback from the input routines. + */ +static unsigned int +p9_virtio_poll(struct p9_trans *trans, struct poll_table_struct *pt) +{ + struct virtio_chan *chan = (struct virtio_chan *)trans->priv; + struct virtqueue *in_vq = chan->in_vq; + int ret = POLLOUT; /* we can always handle more output */ + + poll_wait(NULL, &chan->wq, pt); + + /* No buffer? Try to get one. */ + if (!chan->in_len) + chan->in = in_vq->vq_ops->get_buf(in_vq, &chan->in_len); + + if (chan->in_len) + ret |= POLLIN; + + return ret; +} + +static void p9_virtio_close(struct p9_trans *trans) +{ + struct virtio_chan *chan = trans->priv; + + down(&virtio_9p_lock); + chan->inuse = false; + up(&virtio_9p_lock); + + kfree(trans); +} + +static bool p9_virtio_intr(struct virtqueue *q) +{ + struct virtio_chan *chan = q->vdev->priv; + + P9_DPRINTK(P9_DEBUG_TRANS, "9p poll_wakeup: %p\n", &chan->wq); + wake_up_interruptible(&chan->wq); + + return true; +} + +static int p9_virtio_probe(struct virtio_device *dev) +{ + int err; + struct virtio_chan *chan; + int index; + + down(&virtio_9p_lock); + index = chan_index++; + chan = &channels[index]; + up(&virtio_9p_lock); + + if (chan_index > MAX_9P_CHAN) { + printk(KERN_ERR "9p: virtio: Maximum channels exceeded\n"); + BUG(); + } + + chan->vdev = dev; + + /* This is the scratch page we use to receive console input */ + chan->inbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!chan->inbuf) { + err = -ENOMEM; + goto fail; + } + + /* Find the input queue. */ + dev->priv = chan; + chan->in_vq = dev->config->find_vq(dev, p9_virtio_intr); + if (IS_ERR(chan->in_vq)) { + err = PTR_ERR(chan->in_vq); + goto free; + } + + chan->out_vq = dev->config->find_vq(dev, NULL); + if (IS_ERR(chan->out_vq)) { + err = PTR_ERR(chan->out_vq); + goto free_in_vq; + } + + init_waitqueue_head(&chan->wq); + + /* Register the input buffer the first time. */ + add_inbuf(chan); + chan->inuse = false; + chan->initialized = true; + + return 0; + +free_in_vq: + dev->config->del_vq(chan->in_vq); +free: + kfree(chan->inbuf); +fail: + down(&virtio_9p_lock); + chan_index--; + up(&virtio_9p_lock); + return err; +} + +/* This sets up a transport channel for 9p communication. Right now + * we only match the first available channel, but eventually we couldlook up + * alternate channels by matching devname versus a virtio_config entry. + * We use a simple reference count mechanism to ensure that only a single + * mount has a channel open at a time. */ +static struct p9_trans *p9_virtio_create(const char *devname, char *args) +{ + struct p9_trans *trans; + int index = 0; + struct virtio_chan *chan = channels; + + down(&virtio_9p_lock); + while (index < MAX_9P_CHAN) { + if (chan->initialized && !chan->inuse) { + chan->inuse = true; + break; + } else { + index++; + chan = &channels[index]; + } + } + up(&virtio_9p_lock); + + if (index >= MAX_9P_CHAN) { + printk(KERN_ERR "9p: virtio: couldn't find a free channel\n"); + return NULL; + } + + trans = kmalloc(sizeof(struct p9_trans), GFP_KERNEL); + if (!trans) { + printk(KERN_ERR "9p: couldn't allocate transport\n"); + return ERR_PTR(-ENOMEM); + } + + trans->write = p9_virtio_write; + trans->read = p9_virtio_read; + trans->close = p9_virtio_close; + trans->poll = p9_virtio_poll; + trans->priv = chan; + + return trans; +} + +#define VIRTIO_ID_9P 9 + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_9P, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +/* The standard "struct lguest_driver": */ +static struct virtio_driver p9_virtio_drv = { + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = p9_virtio_probe, +}; + +static struct p9_trans_module p9_virtio_trans = { + .name = "virtio", + .create = p9_virtio_create, + .maxsize = PAGE_SIZE, + .def = 0, +}; + +/* The standard init function */ +static int __init p9_virtio_init(void) +{ + int count; + + for (count = 0; count < MAX_9P_CHAN; count++) + channels[count].initialized = false; + + v9fs_register_trans(&p9_virtio_trans); + return register_virtio_driver(&p9_virtio_drv); +} + +module_init(p9_virtio_init); + +MODULE_DEVICE_TABLE(virtio, id_table); +MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>"); +MODULE_DESCRIPTION("Virtio 9p Transport"); +MODULE_LICENSE("GPL"); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 5fdfc9a67d39..9483320f6dad 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -78,11 +78,11 @@ void hci_acl_connect(struct hci_conn *conn) cp.pkt_type = cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK); if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)) - cp.role_switch = 0x01; + cp.role_switch = 0x01; else - cp.role_switch = 0x00; + cp.role_switch = 0x00; - hci_send_cmd(hdev, OGF_LINK_CTL, OCF_CREATE_CONN, sizeof(cp), &cp); + hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp); } static void hci_acl_connect_cancel(struct hci_conn *conn) @@ -95,8 +95,7 @@ static void hci_acl_connect_cancel(struct hci_conn *conn) return; bacpy(&cp.bdaddr, &conn->dst); - hci_send_cmd(conn->hdev, OGF_LINK_CTL, - OCF_CREATE_CONN_CANCEL, sizeof(cp), &cp); + hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp); } void hci_acl_disconn(struct hci_conn *conn, __u8 reason) @@ -109,8 +108,7 @@ void hci_acl_disconn(struct hci_conn *conn, __u8 reason) cp.handle = cpu_to_le16(conn->handle); cp.reason = reason; - hci_send_cmd(conn->hdev, OGF_LINK_CTL, - OCF_DISCONNECT, sizeof(cp), &cp); + hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp); } void hci_add_sco(struct hci_conn *conn, __u16 handle) @@ -126,7 +124,29 @@ void hci_add_sco(struct hci_conn *conn, __u16 handle) cp.handle = cpu_to_le16(handle); cp.pkt_type = cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK); - hci_send_cmd(hdev, OGF_LINK_CTL, OCF_ADD_SCO, sizeof(cp), &cp); + hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp); +} + +void hci_setup_sync(struct hci_conn *conn, __u16 handle) +{ + struct hci_dev *hdev = conn->hdev; + struct hci_cp_setup_sync_conn cp; + + BT_DBG("%p", conn); + + conn->state = BT_CONNECT; + conn->out = 1; + + cp.handle = cpu_to_le16(handle); + cp.pkt_type = cpu_to_le16(hdev->esco_type); + + cp.tx_bandwidth = cpu_to_le32(0x00001f40); + cp.rx_bandwidth = cpu_to_le32(0x00001f40); + cp.max_latency = cpu_to_le16(0xffff); + cp.voice_setting = cpu_to_le16(hdev->voice_setting); + cp.retrans_effort = 0xff; + + hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp); } static void hci_conn_timeout(unsigned long arg) @@ -143,7 +163,10 @@ static void hci_conn_timeout(unsigned long arg) switch (conn->state) { case BT_CONNECT: - hci_acl_connect_cancel(conn); + if (conn->type == ACL_LINK) + hci_acl_connect_cancel(conn); + else + hci_acl_disconn(conn, 0x13); break; case BT_CONNECTED: hci_acl_disconn(conn, 0x13); @@ -330,8 +353,12 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst) hci_conn_hold(sco); if (acl->state == BT_CONNECTED && - (sco->state == BT_OPEN || sco->state == BT_CLOSED)) - hci_add_sco(sco, acl->handle); + (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { + if (lmp_esco_capable(hdev)) + hci_setup_sync(sco, acl->handle); + else + hci_add_sco(sco, acl->handle); + } return sco; } @@ -348,7 +375,7 @@ int hci_conn_auth(struct hci_conn *conn) if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { struct hci_cp_auth_requested cp; cp.handle = cpu_to_le16(conn->handle); - hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_AUTH_REQUESTED, sizeof(cp), &cp); + hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); } return 0; } @@ -369,7 +396,7 @@ int hci_conn_encrypt(struct hci_conn *conn) struct hci_cp_set_conn_encrypt cp; cp.handle = cpu_to_le16(conn->handle); cp.encrypt = 1; - hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_SET_CONN_ENCRYPT, sizeof(cp), &cp); + hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), &cp); } return 0; } @@ -383,7 +410,7 @@ int hci_conn_change_link_key(struct hci_conn *conn) if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { struct hci_cp_change_conn_link_key cp; cp.handle = cpu_to_le16(conn->handle); - hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_CHANGE_CONN_LINK_KEY, sizeof(cp), &cp); + hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY, sizeof(cp), &cp); } return 0; } @@ -401,7 +428,7 @@ int hci_conn_switch_role(struct hci_conn *conn, uint8_t role) struct hci_cp_switch_role cp; bacpy(&cp.bdaddr, &conn->dst); cp.role = role; - hci_send_cmd(conn->hdev, OGF_LINK_POLICY, OCF_SWITCH_ROLE, sizeof(cp), &cp); + hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp); } return 0; } @@ -423,8 +450,7 @@ void hci_conn_enter_active_mode(struct hci_conn *conn) if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { struct hci_cp_exit_sniff_mode cp; cp.handle = cpu_to_le16(conn->handle); - hci_send_cmd(hdev, OGF_LINK_POLICY, - OCF_EXIT_SNIFF_MODE, sizeof(cp), &cp); + hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp); } timer: @@ -455,8 +481,7 @@ void hci_conn_enter_sniff_mode(struct hci_conn *conn) cp.max_latency = cpu_to_le16(0); cp.min_remote_timeout = cpu_to_le16(0); cp.min_local_timeout = cpu_to_le16(0); - hci_send_cmd(hdev, OGF_LINK_POLICY, - OCF_SNIFF_SUBRATE, sizeof(cp), &cp); + hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp); } if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { @@ -466,8 +491,7 @@ void hci_conn_enter_sniff_mode(struct hci_conn *conn) cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); cp.attempt = cpu_to_le16(4); cp.timeout = cpu_to_le16(1); - hci_send_cmd(hdev, OGF_LINK_POLICY, - OCF_SNIFF_MODE, sizeof(cp), &cp); + hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp); } } @@ -493,6 +517,22 @@ void hci_conn_hash_flush(struct hci_dev *hdev) } } +/* Check pending connect attempts */ +void hci_conn_check_pending(struct hci_dev *hdev) +{ + struct hci_conn *conn; + + BT_DBG("hdev %s", hdev->name); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2); + if (conn) + hci_acl_connect(conn); + + hci_dev_unlock(hdev); +} + int hci_get_conn_list(void __user *arg) { struct hci_conn_list_req req, *cl; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 18e3afc964df..372b0d3b75a8 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -176,7 +176,7 @@ static void hci_reset_req(struct hci_dev *hdev, unsigned long opt) BT_DBG("%s %ld", hdev->name, opt); /* Reset device */ - hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL); + hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); } static void hci_init_req(struct hci_dev *hdev, unsigned long opt) @@ -202,16 +202,16 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt) /* Reset */ if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks)) - hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL); + hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); /* Read Local Supported Features */ - hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL); + hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); /* Read Local Version */ - hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_VERSION, 0, NULL); + hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL); /* Read Buffer Size (ACL mtu, max pkt, etc.) */ - hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL); + hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL); #if 0 /* Host buffer size */ @@ -221,29 +221,35 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt) cp.sco_mtu = HCI_MAX_SCO_SIZE; cp.acl_max_pkt = cpu_to_le16(0xffff); cp.sco_max_pkt = cpu_to_le16(0xffff); - hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE, sizeof(cp), &cp); + hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp); } #endif /* Read BD Address */ - hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL); + hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL); + + /* Read Class of Device */ + hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL); + + /* Read Local Name */ + hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL); /* Read Voice Setting */ - hci_send_cmd(hdev, OGF_HOST_CTL, OCF_READ_VOICE_SETTING, 0, NULL); + hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL); /* Optional initialization */ /* Clear Event Filters */ flt_type = HCI_FLT_CLEAR_ALL; - hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, 1, &flt_type); + hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type); /* Page timeout ~20 secs */ param = cpu_to_le16(0x8000); - hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, ¶m); + hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, ¶m); /* Connection accept timeout ~20 secs */ param = cpu_to_le16(0x7d00); - hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, ¶m); + hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m); } static void hci_scan_req(struct hci_dev *hdev, unsigned long opt) @@ -253,7 +259,7 @@ static void hci_scan_req(struct hci_dev *hdev, unsigned long opt) BT_DBG("%s %x", hdev->name, scan); /* Inquiry and Page scans */ - hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan); + hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); } static void hci_auth_req(struct hci_dev *hdev, unsigned long opt) @@ -263,7 +269,7 @@ static void hci_auth_req(struct hci_dev *hdev, unsigned long opt) BT_DBG("%s %x", hdev->name, auth); /* Authentication */ - hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth); + hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth); } static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt) @@ -273,7 +279,7 @@ static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt) BT_DBG("%s %x", hdev->name, encrypt); /* Authentication */ - hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt); + hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); } /* Get HCI device by index. @@ -384,7 +390,7 @@ static void hci_inq_req(struct hci_dev *hdev, unsigned long opt) memcpy(&cp.lap, &ir->lap, 3); cp.length = ir->length; cp.num_rsp = ir->num_rsp; - hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, sizeof(cp), &cp); + hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp); } int hci_inquiry(void __user *arg) @@ -1111,13 +1117,13 @@ static int hci_send_frame(struct sk_buff *skb) } /* Send HCI command */ -int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param) +int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param) { int len = HCI_COMMAND_HDR_SIZE + plen; struct hci_command_hdr *hdr; struct sk_buff *skb; - BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen); + BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen); skb = bt_skb_alloc(len, GFP_ATOMIC); if (!skb) { @@ -1126,7 +1132,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *p } hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE); - hdr->opcode = cpu_to_le16(hci_opcode_pack(ogf, ocf)); + hdr->opcode = cpu_to_le16(opcode); hdr->plen = plen; if (plen) @@ -1143,7 +1149,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *p } /* Get data from the previously sent command */ -void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf) +void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) { struct hci_command_hdr *hdr; @@ -1152,10 +1158,10 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf) hdr = (void *) hdev->sent_cmd->data; - if (hdr->opcode != cpu_to_le16(hci_opcode_pack(ogf, ocf))) + if (hdr->opcode != cpu_to_le16(opcode)) return NULL; - BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf); + BT_DBG("%s opcode 0x%x", hdev->name, opcode); return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; } @@ -1355,6 +1361,26 @@ static inline void hci_sched_sco(struct hci_dev *hdev) } } +static inline void hci_sched_esco(struct hci_dev *hdev) +{ + struct hci_conn *conn; + struct sk_buff *skb; + int quote; + + BT_DBG("%s", hdev->name); + + while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) { + while (quote-- && (skb = skb_dequeue(&conn->data_q))) { + BT_DBG("skb %p len %d", skb, skb->len); + hci_send_frame(skb); + + conn->sent++; + if (conn->sent == ~0) + conn->sent = 0; + } + } +} + static void hci_tx_task(unsigned long arg) { struct hci_dev *hdev = (struct hci_dev *) arg; @@ -1370,6 +1396,8 @@ static void hci_tx_task(unsigned long arg) hci_sched_sco(hdev); + hci_sched_esco(hdev); + /* Send next queued raw (unknown type) packet */ while ((skb = skb_dequeue(&hdev->raw_q))) hci_send_frame(skb); diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 4baea1e38652..46df2e403df8 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -52,234 +52,273 @@ /* Handle HCI Event packets */ -/* Command Complete OGF LINK_CTL */ -static void hci_cc_link_ctl(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb) +static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) { - __u8 status; - struct hci_conn *pend; + __u8 status = *((__u8 *) skb->data); - BT_DBG("%s ocf 0x%x", hdev->name, ocf); + BT_DBG("%s status 0x%x", hdev->name, status); - switch (ocf) { - case OCF_INQUIRY_CANCEL: - case OCF_EXIT_PERIODIC_INQ: - status = *((__u8 *) skb->data); + if (status) + return; - if (status) { - BT_DBG("%s Inquiry cancel error: status 0x%x", hdev->name, status); - } else { - clear_bit(HCI_INQUIRY, &hdev->flags); - hci_req_complete(hdev, status); - } + clear_bit(HCI_INQUIRY, &hdev->flags); - hci_dev_lock(hdev); + hci_req_complete(hdev, status); - pend = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2); - if (pend) - hci_acl_connect(pend); + hci_conn_check_pending(hdev); +} - hci_dev_unlock(hdev); +static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u8 status = *((__u8 *) skb->data); - break; + BT_DBG("%s status 0x%x", hdev->name, status); - default: - BT_DBG("%s Command complete: ogf LINK_CTL ocf %x", hdev->name, ocf); - break; + if (status) + return; + + clear_bit(HCI_INQUIRY, &hdev->flags); + + hci_conn_check_pending(hdev); +} + +static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb) +{ + BT_DBG("%s", hdev->name); +} + +static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_rp_role_discovery *rp = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s status 0x%x", hdev->name, rp->status); + + if (rp->status) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); + if (conn) { + if (rp->role) + conn->link_mode &= ~HCI_LM_MASTER; + else + conn->link_mode |= HCI_LM_MASTER; } + + hci_dev_unlock(hdev); } -/* Command Complete OGF LINK_POLICY */ -static void hci_cc_link_policy(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb) +static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb) { + struct hci_rp_write_link_policy *rp = (void *) skb->data; struct hci_conn *conn; - struct hci_rp_role_discovery *rd; - struct hci_rp_write_link_policy *lp; void *sent; - BT_DBG("%s ocf 0x%x", hdev->name, ocf); + BT_DBG("%s status 0x%x", hdev->name, rp->status); - switch (ocf) { - case OCF_ROLE_DISCOVERY: - rd = (void *) skb->data; + if (rp->status) + return; - if (rd->status) - break; + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); + if (!sent) + return; - hci_dev_lock(hdev); + hci_dev_lock(hdev); - conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rd->handle)); - if (conn) { - if (rd->role) - conn->link_mode &= ~HCI_LM_MASTER; - else - conn->link_mode |= HCI_LM_MASTER; - } + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); + if (conn) { + __le16 policy = get_unaligned((__le16 *) (sent + 2)); + conn->link_policy = __le16_to_cpu(policy); + } - hci_dev_unlock(hdev); - break; + hci_dev_unlock(hdev); +} - case OCF_WRITE_LINK_POLICY: - sent = hci_sent_cmd_data(hdev, OGF_LINK_POLICY, OCF_WRITE_LINK_POLICY); - if (!sent) - break; +static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u8 status = *((__u8 *) skb->data); - lp = (struct hci_rp_write_link_policy *) skb->data; + BT_DBG("%s status 0x%x", hdev->name, status); - if (lp->status) - break; + hci_req_complete(hdev, status); +} - hci_dev_lock(hdev); +static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u8 status = *((__u8 *) skb->data); + void *sent; - conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(lp->handle)); - if (conn) { - __le16 policy = get_unaligned((__le16 *) (sent + 2)); - conn->link_policy = __le16_to_cpu(policy); - } + BT_DBG("%s status 0x%x", hdev->name, status); - hci_dev_unlock(hdev); - break; + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); + if (!sent) + return; - default: - BT_DBG("%s: Command complete: ogf LINK_POLICY ocf %x", - hdev->name, ocf); - break; + if (!status) + memcpy(hdev->dev_name, sent, 248); +} + +static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_rp_read_local_name *rp = (void *) skb->data; + + BT_DBG("%s status 0x%x", hdev->name, rp->status); + + if (rp->status) + return; + + memcpy(hdev->dev_name, rp->name, 248); +} + +static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u8 status = *((__u8 *) skb->data); + void *sent; + + BT_DBG("%s status 0x%x", hdev->name, status); + + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); + if (!sent) + return; + + if (!status) { + __u8 param = *((__u8 *) sent); + + if (param == AUTH_ENABLED) + set_bit(HCI_AUTH, &hdev->flags); + else + clear_bit(HCI_AUTH, &hdev->flags); } + + hci_req_complete(hdev, status); } -/* Command Complete OGF HOST_CTL */ -static void hci_cc_host_ctl(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb) +static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) { - __u8 status, param; - __u16 setting; - struct hci_rp_read_voice_setting *vs; + __u8 status = *((__u8 *) skb->data); void *sent; - BT_DBG("%s ocf 0x%x", hdev->name, ocf); + BT_DBG("%s status 0x%x", hdev->name, status); - switch (ocf) { - case OCF_RESET: - status = *((__u8 *) skb->data); - hci_req_complete(hdev, status); - break; + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); + if (!sent) + return; - case OCF_SET_EVENT_FLT: - status = *((__u8 *) skb->data); - if (status) { - BT_DBG("%s SET_EVENT_FLT failed %d", hdev->name, status); - } else { - BT_DBG("%s SET_EVENT_FLT succeseful", hdev->name); - } - break; + if (!status) { + __u8 param = *((__u8 *) sent); - case OCF_WRITE_AUTH_ENABLE: - sent = hci_sent_cmd_data(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE); - if (!sent) - break; + if (param) + set_bit(HCI_ENCRYPT, &hdev->flags); + else + clear_bit(HCI_ENCRYPT, &hdev->flags); + } - status = *((__u8 *) skb->data); - param = *((__u8 *) sent); + hci_req_complete(hdev, status); +} - if (!status) { - if (param == AUTH_ENABLED) - set_bit(HCI_AUTH, &hdev->flags); - else - clear_bit(HCI_AUTH, &hdev->flags); - } - hci_req_complete(hdev, status); - break; +static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u8 status = *((__u8 *) skb->data); + void *sent; - case OCF_WRITE_ENCRYPT_MODE: - sent = hci_sent_cmd_data(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE); - if (!sent) - break; + BT_DBG("%s status 0x%x", hdev->name, status); - status = *((__u8 *) skb->data); - param = *((__u8 *) sent); + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); + if (!sent) + return; - if (!status) { - if (param) - set_bit(HCI_ENCRYPT, &hdev->flags); - else - clear_bit(HCI_ENCRYPT, &hdev->flags); - } - hci_req_complete(hdev, status); - break; + if (!status) { + __u8 param = *((__u8 *) sent); - case OCF_WRITE_CA_TIMEOUT: - status = *((__u8 *) skb->data); - if (status) { - BT_DBG("%s OCF_WRITE_CA_TIMEOUT failed %d", hdev->name, status); - } else { - BT_DBG("%s OCF_WRITE_CA_TIMEOUT succeseful", hdev->name); - } - break; + clear_bit(HCI_PSCAN, &hdev->flags); + clear_bit(HCI_ISCAN, &hdev->flags); - case OCF_WRITE_PG_TIMEOUT: - status = *((__u8 *) skb->data); - if (status) { - BT_DBG("%s OCF_WRITE_PG_TIMEOUT failed %d", hdev->name, status); - } else { - BT_DBG("%s: OCF_WRITE_PG_TIMEOUT succeseful", hdev->name); - } - break; + if (param & SCAN_INQUIRY) + set_bit(HCI_ISCAN, &hdev->flags); - case OCF_WRITE_SCAN_ENABLE: - sent = hci_sent_cmd_data(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE); - if (!sent) - break; + if (param & SCAN_PAGE) + set_bit(HCI_PSCAN, &hdev->flags); + } - status = *((__u8 *) skb->data); - param = *((__u8 *) sent); + hci_req_complete(hdev, status); +} - BT_DBG("param 0x%x", param); +static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_rp_read_class_of_dev *rp = (void *) skb->data; - if (!status) { - clear_bit(HCI_PSCAN, &hdev->flags); - clear_bit(HCI_ISCAN, &hdev->flags); - if (param & SCAN_INQUIRY) - set_bit(HCI_ISCAN, &hdev->flags); + BT_DBG("%s status 0x%x", hdev->name, rp->status); - if (param & SCAN_PAGE) - set_bit(HCI_PSCAN, &hdev->flags); - } - hci_req_complete(hdev, status); - break; + if (rp->status) + return; - case OCF_READ_VOICE_SETTING: - vs = (struct hci_rp_read_voice_setting *) skb->data; + memcpy(hdev->dev_class, rp->dev_class, 3); - if (vs->status) { - BT_DBG("%s READ_VOICE_SETTING failed %d", hdev->name, vs->status); - break; - } + BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, + hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); +} - setting = __le16_to_cpu(vs->voice_setting); +static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u8 status = *((__u8 *) skb->data); + void *sent; - if (hdev->voice_setting != setting ) { - hdev->voice_setting = setting; + BT_DBG("%s status 0x%x", hdev->name, status); - BT_DBG("%s: voice setting 0x%04x", hdev->name, setting); + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); + if (!sent) + return; - if (hdev->notify) { - tasklet_disable(&hdev->tx_task); - hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); - tasklet_enable(&hdev->tx_task); - } - } - break; + if (!status) + memcpy(hdev->dev_class, sent, 3); +} - case OCF_WRITE_VOICE_SETTING: - sent = hci_sent_cmd_data(hdev, OGF_HOST_CTL, OCF_WRITE_VOICE_SETTING); - if (!sent) - break; +static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_rp_read_voice_setting *rp = (void *) skb->data; + __u16 setting; + + BT_DBG("%s status 0x%x", hdev->name, rp->status); + + if (rp->status) + return; + + setting = __le16_to_cpu(rp->voice_setting); + + if (hdev->voice_setting == setting ) + return; + + hdev->voice_setting = setting; - status = *((__u8 *) skb->data); - setting = __le16_to_cpu(get_unaligned((__le16 *) sent)); + BT_DBG("%s voice setting 0x%04x", hdev->name, setting); - if (!status && hdev->voice_setting != setting) { + if (hdev->notify) { + tasklet_disable(&hdev->tx_task); + hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); + tasklet_enable(&hdev->tx_task); + } +} + +static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u8 status = *((__u8 *) skb->data); + void *sent; + + BT_DBG("%s status 0x%x", hdev->name, status); + + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); + if (!sent) + return; + + if (!status) { + __u16 setting = __le16_to_cpu(get_unaligned((__le16 *) sent)); + + if (hdev->voice_setting != setting) { hdev->voice_setting = setting; - BT_DBG("%s: voice setting 0x%04x", hdev->name, setting); + BT_DBG("%s voice setting 0x%04x", hdev->name, setting); if (hdev->notify) { tasklet_disable(&hdev->tx_task); @@ -287,143 +326,153 @@ static void hci_cc_host_ctl(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb tasklet_enable(&hdev->tx_task); } } - hci_req_complete(hdev, status); - break; - - case OCF_HOST_BUFFER_SIZE: - status = *((__u8 *) skb->data); - if (status) { - BT_DBG("%s OCF_BUFFER_SIZE failed %d", hdev->name, status); - hci_req_complete(hdev, status); - } - break; - - default: - BT_DBG("%s Command complete: ogf HOST_CTL ocf %x", hdev->name, ocf); - break; } } -/* Command Complete OGF INFO_PARAM */ -static void hci_cc_info_param(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb) +static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) { - struct hci_rp_read_loc_version *lv; - struct hci_rp_read_local_features *lf; - struct hci_rp_read_buffer_size *bs; - struct hci_rp_read_bd_addr *ba; + __u8 status = *((__u8 *) skb->data); - BT_DBG("%s ocf 0x%x", hdev->name, ocf); + BT_DBG("%s status 0x%x", hdev->name, status); - switch (ocf) { - case OCF_READ_LOCAL_VERSION: - lv = (struct hci_rp_read_loc_version *) skb->data; + hci_req_complete(hdev, status); +} - if (lv->status) { - BT_DBG("%s READ_LOCAL_VERSION failed %d", hdev->name, lf->status); - break; - } +static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_rp_read_local_version *rp = (void *) skb->data; - hdev->hci_ver = lv->hci_ver; - hdev->hci_rev = btohs(lv->hci_rev); - hdev->manufacturer = btohs(lv->manufacturer); + BT_DBG("%s status 0x%x", hdev->name, rp->status); - BT_DBG("%s: manufacturer %d hci_ver %d hci_rev %d", hdev->name, - hdev->manufacturer, hdev->hci_ver, hdev->hci_rev); + if (rp->status) + return; - break; + hdev->hci_ver = rp->hci_ver; + hdev->hci_rev = btohs(rp->hci_rev); + hdev->manufacturer = btohs(rp->manufacturer); - case OCF_READ_LOCAL_FEATURES: - lf = (struct hci_rp_read_local_features *) skb->data; + BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name, + hdev->manufacturer, + hdev->hci_ver, hdev->hci_rev); +} - if (lf->status) { - BT_DBG("%s READ_LOCAL_FEATURES failed %d", hdev->name, lf->status); - break; - } +static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_rp_read_local_commands *rp = (void *) skb->data; - memcpy(hdev->features, lf->features, sizeof(hdev->features)); + BT_DBG("%s status 0x%x", hdev->name, rp->status); - /* Adjust default settings according to features - * supported by device. */ - if (hdev->features[0] & LMP_3SLOT) - hdev->pkt_type |= (HCI_DM3 | HCI_DH3); + if (rp->status) + return; - if (hdev->features[0] & LMP_5SLOT) - hdev->pkt_type |= (HCI_DM5 | HCI_DH5); + memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); +} - if (hdev->features[1] & LMP_HV2) { - hdev->pkt_type |= (HCI_HV2); - hdev->esco_type |= (ESCO_HV2); - } +static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_rp_read_local_features *rp = (void *) skb->data; - if (hdev->features[1] & LMP_HV3) { - hdev->pkt_type |= (HCI_HV3); - hdev->esco_type |= (ESCO_HV3); - } + BT_DBG("%s status 0x%x", hdev->name, rp->status); - if (hdev->features[3] & LMP_ESCO) - hdev->esco_type |= (ESCO_EV3); + if (rp->status) + return; - if (hdev->features[4] & LMP_EV4) - hdev->esco_type |= (ESCO_EV4); + memcpy(hdev->features, rp->features, 8); - if (hdev->features[4] & LMP_EV5) - hdev->esco_type |= (ESCO_EV5); + /* Adjust default settings according to features + * supported by device. */ - BT_DBG("%s: features 0x%x 0x%x 0x%x", hdev->name, - lf->features[0], lf->features[1], lf->features[2]); + if (hdev->features[0] & LMP_3SLOT) + hdev->pkt_type |= (HCI_DM3 | HCI_DH3); - break; + if (hdev->features[0] & LMP_5SLOT) + hdev->pkt_type |= (HCI_DM5 | HCI_DH5); - case OCF_READ_BUFFER_SIZE: - bs = (struct hci_rp_read_buffer_size *) skb->data; + if (hdev->features[1] & LMP_HV2) { + hdev->pkt_type |= (HCI_HV2); + hdev->esco_type |= (ESCO_HV2); + } - if (bs->status) { - BT_DBG("%s READ_BUFFER_SIZE failed %d", hdev->name, bs->status); - hci_req_complete(hdev, bs->status); - break; - } + if (hdev->features[1] & LMP_HV3) { + hdev->pkt_type |= (HCI_HV3); + hdev->esco_type |= (ESCO_HV3); + } - hdev->acl_mtu = __le16_to_cpu(bs->acl_mtu); - hdev->sco_mtu = bs->sco_mtu; - hdev->acl_pkts = __le16_to_cpu(bs->acl_max_pkt); - hdev->sco_pkts = __le16_to_cpu(bs->sco_max_pkt); + if (hdev->features[3] & LMP_ESCO) + hdev->esco_type |= (ESCO_EV3); - if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { - hdev->sco_mtu = 64; - hdev->sco_pkts = 8; - } + if (hdev->features[4] & LMP_EV4) + hdev->esco_type |= (ESCO_EV4); - hdev->acl_cnt = hdev->acl_pkts; - hdev->sco_cnt = hdev->sco_pkts; + if (hdev->features[4] & LMP_EV5) + hdev->esco_type |= (ESCO_EV5); - BT_DBG("%s mtu: acl %d, sco %d max_pkt: acl %d, sco %d", hdev->name, - hdev->acl_mtu, hdev->sco_mtu, hdev->acl_pkts, hdev->sco_pkts); - break; + BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name, + hdev->features[0], hdev->features[1], + hdev->features[2], hdev->features[3], + hdev->features[4], hdev->features[5], + hdev->features[6], hdev->features[7]); +} - case OCF_READ_BD_ADDR: - ba = (struct hci_rp_read_bd_addr *) skb->data; +static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_rp_read_buffer_size *rp = (void *) skb->data; - if (!ba->status) { - bacpy(&hdev->bdaddr, &ba->bdaddr); - } else { - BT_DBG("%s: READ_BD_ADDR failed %d", hdev->name, ba->status); - } + BT_DBG("%s status 0x%x", hdev->name, rp->status); - hci_req_complete(hdev, ba->status); - break; + if (rp->status) + return; - default: - BT_DBG("%s Command complete: ogf INFO_PARAM ocf %x", hdev->name, ocf); - break; + hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); + hdev->sco_mtu = rp->sco_mtu; + hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); + hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); + + if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { + hdev->sco_mtu = 64; + hdev->sco_pkts = 8; } + + hdev->acl_cnt = hdev->acl_pkts; + hdev->sco_cnt = hdev->sco_pkts; + + BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, + hdev->acl_mtu, hdev->acl_pkts, + hdev->sco_mtu, hdev->sco_pkts); +} + +static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_rp_read_bd_addr *rp = (void *) skb->data; + + BT_DBG("%s status 0x%x", hdev->name, rp->status); + + if (!rp->status) + bacpy(&hdev->bdaddr, &rp->bdaddr); + + hci_req_complete(hdev, rp->status); +} + +static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) +{ + BT_DBG("%s status 0x%x", hdev->name, status); + + if (status) { + hci_req_complete(hdev, status); + + hci_conn_check_pending(hdev); + } else + set_bit(HCI_INQUIRY, &hdev->flags); } -/* Command Status OGF LINK_CTL */ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) { + struct hci_cp_create_conn *cp; struct hci_conn *conn; - struct hci_cp_create_conn *cp = hci_sent_cmd_data(hdev, OGF_LINK_CTL, OCF_CREATE_CONN); + BT_DBG("%s status 0x%x", hdev->name, status); + + cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); if (!cp) return; @@ -431,8 +480,7 @@ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); - BT_DBG("%s status 0x%x bdaddr %s conn %p", hdev->name, - status, batostr(&cp->bdaddr), conn); + BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn); if (status) { if (conn && conn->state == BT_CONNECT) { @@ -457,234 +505,138 @@ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) hci_dev_unlock(hdev); } -static void hci_cs_link_ctl(struct hci_dev *hdev, __u16 ocf, __u8 status) +static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) { - BT_DBG("%s ocf 0x%x", hdev->name, ocf); + struct hci_cp_add_sco *cp; + struct hci_conn *acl, *sco; + __u16 handle; - switch (ocf) { - case OCF_CREATE_CONN: - hci_cs_create_conn(hdev, status); - break; - - case OCF_ADD_SCO: - if (status) { - struct hci_conn *acl, *sco; - struct hci_cp_add_sco *cp = hci_sent_cmd_data(hdev, OGF_LINK_CTL, OCF_ADD_SCO); - __u16 handle; - - if (!cp) - break; + BT_DBG("%s status 0x%x", hdev->name, status); - handle = __le16_to_cpu(cp->handle); - - BT_DBG("%s Add SCO error: handle %d status 0x%x", hdev->name, handle, status); + if (!status) + return; - hci_dev_lock(hdev); + cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); + if (!cp) + return; - acl = hci_conn_hash_lookup_handle(hdev, handle); - if (acl && (sco = acl->link)) { - sco->state = BT_CLOSED; + handle = __le16_to_cpu(cp->handle); - hci_proto_connect_cfm(sco, status); - hci_conn_del(sco); - } + BT_DBG("%s handle %d", hdev->name, handle); - hci_dev_unlock(hdev); - } - break; + hci_dev_lock(hdev); - case OCF_INQUIRY: - if (status) { - BT_DBG("%s Inquiry error: status 0x%x", hdev->name, status); - hci_req_complete(hdev, status); - } else { - set_bit(HCI_INQUIRY, &hdev->flags); - } - break; + acl = hci_conn_hash_lookup_handle(hdev, handle); + if (acl && (sco = acl->link)) { + sco->state = BT_CLOSED; - default: - BT_DBG("%s Command status: ogf LINK_CTL ocf %x status %d", - hdev->name, ocf, status); - break; + hci_proto_connect_cfm(sco, status); + hci_conn_del(sco); } + + hci_dev_unlock(hdev); } -/* Command Status OGF LINK_POLICY */ -static void hci_cs_link_policy(struct hci_dev *hdev, __u16 ocf, __u8 status) +static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) { - BT_DBG("%s ocf 0x%x", hdev->name, ocf); - - switch (ocf) { - case OCF_SNIFF_MODE: - if (status) { - struct hci_conn *conn; - struct hci_cp_sniff_mode *cp = hci_sent_cmd_data(hdev, OGF_LINK_POLICY, OCF_SNIFF_MODE); + BT_DBG("%s status 0x%x", hdev->name, status); +} - if (!cp) - break; +static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) +{ + struct hci_cp_setup_sync_conn *cp; + struct hci_conn *acl, *sco; + __u16 handle; - hci_dev_lock(hdev); + BT_DBG("%s status 0x%x", hdev->name, status); - conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); - if (conn) { - clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); - } - - hci_dev_unlock(hdev); - } - break; + if (!status) + return; - case OCF_EXIT_SNIFF_MODE: - if (status) { - struct hci_conn *conn; - struct hci_cp_exit_sniff_mode *cp = hci_sent_cmd_data(hdev, OGF_LINK_POLICY, OCF_EXIT_SNIFF_MODE); + cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); + if (!cp) + return; - if (!cp) - break; + handle = __le16_to_cpu(cp->handle); - hci_dev_lock(hdev); + BT_DBG("%s handle %d", hdev->name, handle); - conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); - if (conn) { - clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); - } + hci_dev_lock(hdev); - hci_dev_unlock(hdev); - } - break; + acl = hci_conn_hash_lookup_handle(hdev, handle); + if (acl && (sco = acl->link)) { + sco->state = BT_CLOSED; - default: - BT_DBG("%s Command status: ogf LINK_POLICY ocf %x", hdev->name, ocf); - break; + hci_proto_connect_cfm(sco, status); + hci_conn_del(sco); } -} -/* Command Status OGF HOST_CTL */ -static void hci_cs_host_ctl(struct hci_dev *hdev, __u16 ocf, __u8 status) -{ - BT_DBG("%s ocf 0x%x", hdev->name, ocf); - - switch (ocf) { - default: - BT_DBG("%s Command status: ogf HOST_CTL ocf %x", hdev->name, ocf); - break; - } + hci_dev_unlock(hdev); } -/* Command Status OGF INFO_PARAM */ -static void hci_cs_info_param(struct hci_dev *hdev, __u16 ocf, __u8 status) +static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) { - BT_DBG("%s: hci_cs_info_param: ocf 0x%x", hdev->name, ocf); - - switch (ocf) { - default: - BT_DBG("%s Command status: ogf INFO_PARAM ocf %x", hdev->name, ocf); - break; - } -} + struct hci_cp_sniff_mode *cp; + struct hci_conn *conn; -/* Inquiry Complete */ -static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) -{ - __u8 status = *((__u8 *) skb->data); - struct hci_conn *pend; + BT_DBG("%s status 0x%x", hdev->name, status); - BT_DBG("%s status %d", hdev->name, status); + if (!status) + return; - clear_bit(HCI_INQUIRY, &hdev->flags); - hci_req_complete(hdev, status); + cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); + if (!cp) + return; hci_dev_lock(hdev); - pend = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2); - if (pend) - hci_acl_connect(pend); + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); + if (conn) + clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); hci_dev_unlock(hdev); } -/* Inquiry Result */ -static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) { - struct inquiry_data data; - struct inquiry_info *info = (struct inquiry_info *) (skb->data + 1); - int num_rsp = *((__u8 *) skb->data); + struct hci_cp_exit_sniff_mode *cp; + struct hci_conn *conn; - BT_DBG("%s num_rsp %d", hdev->name, num_rsp); + BT_DBG("%s status 0x%x", hdev->name, status); - if (!num_rsp) + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); + if (!cp) return; hci_dev_lock(hdev); - for (; num_rsp; num_rsp--) { - bacpy(&data.bdaddr, &info->bdaddr); - data.pscan_rep_mode = info->pscan_rep_mode; - data.pscan_period_mode = info->pscan_period_mode; - data.pscan_mode = info->pscan_mode; - memcpy(data.dev_class, info->dev_class, 3); - data.clock_offset = info->clock_offset; - data.rssi = 0x00; - info++; - hci_inquiry_cache_update(hdev, &data); - } + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); + if (conn) + clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); hci_dev_unlock(hdev); } -/* Inquiry Result With RSSI */ -static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { - struct inquiry_data data; - int num_rsp = *((__u8 *) skb->data); - - BT_DBG("%s num_rsp %d", hdev->name, num_rsp); - - if (!num_rsp) - return; - - hci_dev_lock(hdev); + __u8 status = *((__u8 *) skb->data); - if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { - struct inquiry_info_with_rssi_and_pscan_mode *info = - (struct inquiry_info_with_rssi_and_pscan_mode *) (skb->data + 1); + BT_DBG("%s status %d", hdev->name, status); - for (; num_rsp; num_rsp--) { - bacpy(&data.bdaddr, &info->bdaddr); - data.pscan_rep_mode = info->pscan_rep_mode; - data.pscan_period_mode = info->pscan_period_mode; - data.pscan_mode = info->pscan_mode; - memcpy(data.dev_class, info->dev_class, 3); - data.clock_offset = info->clock_offset; - data.rssi = info->rssi; - info++; - hci_inquiry_cache_update(hdev, &data); - } - } else { - struct inquiry_info_with_rssi *info = - (struct inquiry_info_with_rssi *) (skb->data + 1); + clear_bit(HCI_INQUIRY, &hdev->flags); - for (; num_rsp; num_rsp--) { - bacpy(&data.bdaddr, &info->bdaddr); - data.pscan_rep_mode = info->pscan_rep_mode; - data.pscan_period_mode = info->pscan_period_mode; - data.pscan_mode = 0x00; - memcpy(data.dev_class, info->dev_class, 3); - data.clock_offset = info->clock_offset; - data.rssi = info->rssi; - info++; - hci_inquiry_cache_update(hdev, &data); - } - } + hci_req_complete(hdev, status); - hci_dev_unlock(hdev); + hci_conn_check_pending(hdev); } -/* Extended Inquiry Result */ -static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct inquiry_data data; - struct extended_inquiry_info *info = (struct extended_inquiry_info *) (skb->data + 1); + struct inquiry_info *info = (void *) (skb->data + 1); int num_rsp = *((__u8 *) skb->data); BT_DBG("%s num_rsp %d", hdev->name, num_rsp); @@ -696,12 +648,12 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct for (; num_rsp; num_rsp--) { bacpy(&data.bdaddr, &info->bdaddr); - data.pscan_rep_mode = info->pscan_rep_mode; - data.pscan_period_mode = info->pscan_period_mode; - data.pscan_mode = 0x00; + data.pscan_rep_mode = info->pscan_rep_mode; + data.pscan_period_mode = info->pscan_period_mode; + data.pscan_mode = info->pscan_mode; memcpy(data.dev_class, info->dev_class, 3); - data.clock_offset = info->clock_offset; - data.rssi = info->rssi; + data.clock_offset = info->clock_offset; + data.rssi = 0x00; info++; hci_inquiry_cache_update(hdev, &data); } @@ -709,70 +661,18 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct hci_dev_unlock(hdev); } -/* Connect Request */ -static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) -{ - struct hci_ev_conn_request *ev = (struct hci_ev_conn_request *) skb->data; - int mask = hdev->link_mode; - - BT_DBG("%s Connection request: %s type 0x%x", hdev->name, - batostr(&ev->bdaddr), ev->link_type); - - mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); - - if (mask & HCI_LM_ACCEPT) { - /* Connection accepted */ - struct hci_conn *conn; - struct hci_cp_accept_conn_req cp; - - hci_dev_lock(hdev); - conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); - if (!conn) { - if (!(conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr))) { - BT_ERR("No memmory for new connection"); - hci_dev_unlock(hdev); - return; - } - } - memcpy(conn->dev_class, ev->dev_class, 3); - conn->state = BT_CONNECT; - hci_dev_unlock(hdev); - - bacpy(&cp.bdaddr, &ev->bdaddr); - - if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) - cp.role = 0x00; /* Become master */ - else - cp.role = 0x01; /* Remain slave */ - - hci_send_cmd(hdev, OGF_LINK_CTL, - OCF_ACCEPT_CONN_REQ, sizeof(cp), &cp); - } else { - /* Connection rejected */ - struct hci_cp_reject_conn_req cp; - - bacpy(&cp.bdaddr, &ev->bdaddr); - cp.reason = 0x0f; - hci_send_cmd(hdev, OGF_LINK_CTL, - OCF_REJECT_CONN_REQ, sizeof(cp), &cp); - } -} - -/* Connect Complete */ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { - struct hci_ev_conn_complete *ev = (struct hci_ev_conn_complete *) skb->data; - struct hci_conn *conn, *pend; + struct hci_ev_conn_complete *ev = (void *) skb->data; + struct hci_conn *conn; BT_DBG("%s", hdev->name); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); - if (!conn) { - hci_dev_unlock(hdev); - return; - } + if (!conn) + goto unlock; if (!ev->status) { conn->handle = __le16_to_cpu(ev->handle); @@ -788,8 +688,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s if (conn->type == ACL_LINK) { struct hci_cp_read_remote_features cp; cp.handle = ev->handle; - hci_send_cmd(hdev, OGF_LINK_CTL, - OCF_READ_REMOTE_FEATURES, sizeof(cp), &cp); + hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, sizeof(cp), &cp); } /* Set link policy */ @@ -797,8 +696,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s struct hci_cp_write_link_policy cp; cp.handle = ev->handle; cp.policy = cpu_to_le16(hdev->link_policy); - hci_send_cmd(hdev, OGF_LINK_POLICY, - OCF_WRITE_LINK_POLICY, sizeof(cp), &cp); + hci_send_cmd(hdev, HCI_OP_WRITE_LINK_POLICY, sizeof(cp), &cp); } /* Set packet type for incoming connection */ @@ -809,8 +707,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK): cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK); - hci_send_cmd(hdev, OGF_LINK_CTL, - OCF_CHANGE_CONN_PTYPE, sizeof(cp), &cp); + hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), &cp); } else { /* Update disconnect timer */ hci_conn_hold(conn); @@ -822,9 +719,12 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s if (conn->type == ACL_LINK) { struct hci_conn *sco = conn->link; if (sco) { - if (!ev->status) - hci_add_sco(sco, conn->handle); - else { + if (!ev->status) { + if (lmp_esco_capable(hdev)) + hci_setup_sync(sco, conn->handle); + else + hci_add_sco(sco, conn->handle); + } else { hci_proto_connect_cfm(sco, ev->status); hci_conn_del(sco); } @@ -835,136 +735,104 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s if (ev->status) hci_conn_del(conn); - pend = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2); - if (pend) - hci_acl_connect(pend); - +unlock: hci_dev_unlock(hdev); -} - -/* Disconnect Complete */ -static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) -{ - struct hci_ev_disconn_complete *ev = (struct hci_ev_disconn_complete *) skb->data; - struct hci_conn *conn; - - BT_DBG("%s status %d", hdev->name, ev->status); - - if (ev->status) - return; - hci_dev_lock(hdev); - - conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); - if (conn) { - conn->state = BT_CLOSED; - hci_proto_disconn_ind(conn, ev->reason); - hci_conn_del(conn); - } - - hci_dev_unlock(hdev); + hci_conn_check_pending(hdev); } -/* Number of completed packets */ -static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) { - struct hci_ev_num_comp_pkts *ev = (struct hci_ev_num_comp_pkts *) skb->data; - __le16 *ptr; - int i; - - skb_pull(skb, sizeof(*ev)); - - BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl); + struct hci_ev_conn_request *ev = (void *) skb->data; + int mask = hdev->link_mode; - if (skb->len < ev->num_hndl * 4) { - BT_DBG("%s bad parameters", hdev->name); - return; - } + BT_DBG("%s bdaddr %s type 0x%x", hdev->name, + batostr(&ev->bdaddr), ev->link_type); - tasklet_disable(&hdev->tx_task); + mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); - for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) { + if (mask & HCI_LM_ACCEPT) { + /* Connection accepted */ struct hci_conn *conn; - __u16 handle, count; - - handle = __le16_to_cpu(get_unaligned(ptr++)); - count = __le16_to_cpu(get_unaligned(ptr++)); - conn = hci_conn_hash_lookup_handle(hdev, handle); - if (conn) { - conn->sent -= count; + hci_dev_lock(hdev); - if (conn->type == ACL_LINK) { - if ((hdev->acl_cnt += count) > hdev->acl_pkts) - hdev->acl_cnt = hdev->acl_pkts; - } else { - if ((hdev->sco_cnt += count) > hdev->sco_pkts) - hdev->sco_cnt = hdev->sco_pkts; + conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); + if (!conn) { + if (!(conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr))) { + BT_ERR("No memmory for new connection"); + hci_dev_unlock(hdev); + return; } } - } - hci_sched_tx(hdev); - tasklet_enable(&hdev->tx_task); -} + memcpy(conn->dev_class, ev->dev_class, 3); + conn->state = BT_CONNECT; -/* Role Change */ -static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) -{ - struct hci_ev_role_change *ev = (struct hci_ev_role_change *) skb->data; - struct hci_conn *conn; + hci_dev_unlock(hdev); - BT_DBG("%s status %d", hdev->name, ev->status); + if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) { + struct hci_cp_accept_conn_req cp; - hci_dev_lock(hdev); + bacpy(&cp.bdaddr, &ev->bdaddr); - conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); - if (conn) { - if (!ev->status) { - if (ev->role) - conn->link_mode &= ~HCI_LM_MASTER; + if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) + cp.role = 0x00; /* Become master */ else - conn->link_mode |= HCI_LM_MASTER; - } + cp.role = 0x01; /* Remain slave */ - clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend); + hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, + sizeof(cp), &cp); + } else { + struct hci_cp_accept_sync_conn_req cp; - hci_role_switch_cfm(conn, ev->status, ev->role); - } + bacpy(&cp.bdaddr, &ev->bdaddr); + cp.pkt_type = cpu_to_le16(hdev->esco_type); - hci_dev_unlock(hdev); + cp.tx_bandwidth = cpu_to_le32(0x00001f40); + cp.rx_bandwidth = cpu_to_le32(0x00001f40); + cp.max_latency = cpu_to_le16(0xffff); + cp.content_format = cpu_to_le16(hdev->voice_setting); + cp.retrans_effort = 0xff; + + hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, + sizeof(cp), &cp); + } + } else { + /* Connection rejected */ + struct hci_cp_reject_conn_req cp; + + bacpy(&cp.bdaddr, &ev->bdaddr); + cp.reason = 0x0f; + hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); + } } -/* Mode Change */ -static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { - struct hci_ev_mode_change *ev = (struct hci_ev_mode_change *) skb->data; + struct hci_ev_disconn_complete *ev = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s status %d", hdev->name, ev->status); + if (ev->status) + return; + hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (conn) { - conn->mode = ev->mode; - conn->interval = __le16_to_cpu(ev->interval); - - if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { - if (conn->mode == HCI_CM_ACTIVE) - conn->power_save = 1; - else - conn->power_save = 0; - } + conn->state = BT_CLOSED; + hci_proto_disconn_ind(conn, ev->reason); + hci_conn_del(conn); } hci_dev_unlock(hdev); } -/* Authentication Complete */ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { - struct hci_ev_auth_complete *ev = (struct hci_ev_auth_complete *) skb->data; + struct hci_ev_auth_complete *ev = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s status %d", hdev->name, ev->status); @@ -985,8 +853,8 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s struct hci_cp_set_conn_encrypt cp; cp.handle = cpu_to_le16(conn->handle); cp.encrypt = 1; - hci_send_cmd(conn->hdev, OGF_LINK_CTL, - OCF_SET_CONN_ENCRYPT, sizeof(cp), &cp); + hci_send_cmd(conn->hdev, + HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), &cp); } else { clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); hci_encrypt_cfm(conn, ev->status, 0x00); @@ -997,10 +865,16 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s hci_dev_unlock(hdev); } -/* Encryption Change */ +static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + BT_DBG("%s", hdev->name); + + hci_conn_check_pending(hdev); +} + static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) { - struct hci_ev_encrypt_change *ev = (struct hci_ev_encrypt_change *) skb->data; + struct hci_ev_encrypt_change *ev = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s status %d", hdev->name, ev->status); @@ -1024,10 +898,9 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff * hci_dev_unlock(hdev); } -/* Change Connection Link Key Complete */ -static inline void hci_change_conn_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { - struct hci_ev_change_conn_link_key_complete *ev = (struct hci_ev_change_conn_link_key_complete *) skb->data; + struct hci_ev_change_link_key_complete *ev = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s status %d", hdev->name, ev->status); @@ -1047,25 +920,263 @@ static inline void hci_change_conn_link_key_complete_evt(struct hci_dev *hdev, s hci_dev_unlock(hdev); } -/* Pin Code Request*/ -static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb) { + struct hci_ev_remote_features *ev = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s status %d", hdev->name, ev->status); + + if (ev->status) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (conn) + memcpy(conn->features, ev->features, 8); + + hci_dev_unlock(hdev); } -/* Link Key Request */ -static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb) { + BT_DBG("%s", hdev->name); } -/* Link Key Notification */ -static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { + BT_DBG("%s", hdev->name); } -/* Remote Features */ -static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { - struct hci_ev_remote_features *ev = (struct hci_ev_remote_features *) skb->data; + struct hci_ev_cmd_complete *ev = (void *) skb->data; + __u16 opcode; + + skb_pull(skb, sizeof(*ev)); + + opcode = __le16_to_cpu(ev->opcode); + + switch (opcode) { + case HCI_OP_INQUIRY_CANCEL: + hci_cc_inquiry_cancel(hdev, skb); + break; + + case HCI_OP_EXIT_PERIODIC_INQ: + hci_cc_exit_periodic_inq(hdev, skb); + break; + + case HCI_OP_REMOTE_NAME_REQ_CANCEL: + hci_cc_remote_name_req_cancel(hdev, skb); + break; + + case HCI_OP_ROLE_DISCOVERY: + hci_cc_role_discovery(hdev, skb); + break; + + case HCI_OP_WRITE_LINK_POLICY: + hci_cc_write_link_policy(hdev, skb); + break; + + case HCI_OP_RESET: + hci_cc_reset(hdev, skb); + break; + + case HCI_OP_WRITE_LOCAL_NAME: + hci_cc_write_local_name(hdev, skb); + break; + + case HCI_OP_READ_LOCAL_NAME: + hci_cc_read_local_name(hdev, skb); + break; + + case HCI_OP_WRITE_AUTH_ENABLE: + hci_cc_write_auth_enable(hdev, skb); + break; + + case HCI_OP_WRITE_ENCRYPT_MODE: + hci_cc_write_encrypt_mode(hdev, skb); + break; + + case HCI_OP_WRITE_SCAN_ENABLE: + hci_cc_write_scan_enable(hdev, skb); + break; + + case HCI_OP_READ_CLASS_OF_DEV: + hci_cc_read_class_of_dev(hdev, skb); + break; + + case HCI_OP_WRITE_CLASS_OF_DEV: + hci_cc_write_class_of_dev(hdev, skb); + break; + + case HCI_OP_READ_VOICE_SETTING: + hci_cc_read_voice_setting(hdev, skb); + break; + + case HCI_OP_WRITE_VOICE_SETTING: + hci_cc_write_voice_setting(hdev, skb); + break; + + case HCI_OP_HOST_BUFFER_SIZE: + hci_cc_host_buffer_size(hdev, skb); + break; + + case HCI_OP_READ_LOCAL_VERSION: + hci_cc_read_local_version(hdev, skb); + break; + + case HCI_OP_READ_LOCAL_COMMANDS: + hci_cc_read_local_commands(hdev, skb); + break; + + case HCI_OP_READ_LOCAL_FEATURES: + hci_cc_read_local_features(hdev, skb); + break; + + case HCI_OP_READ_BUFFER_SIZE: + hci_cc_read_buffer_size(hdev, skb); + break; + + case HCI_OP_READ_BD_ADDR: + hci_cc_read_bd_addr(hdev, skb); + break; + + default: + BT_DBG("%s opcode 0x%x", hdev->name, opcode); + break; + } + + if (ev->ncmd) { + atomic_set(&hdev->cmd_cnt, 1); + if (!skb_queue_empty(&hdev->cmd_q)) + hci_sched_cmd(hdev); + } +} + +static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_cmd_status *ev = (void *) skb->data; + __u16 opcode; + + skb_pull(skb, sizeof(*ev)); + + opcode = __le16_to_cpu(ev->opcode); + + switch (opcode) { + case HCI_OP_INQUIRY: + hci_cs_inquiry(hdev, ev->status); + break; + + case HCI_OP_CREATE_CONN: + hci_cs_create_conn(hdev, ev->status); + break; + + case HCI_OP_ADD_SCO: + hci_cs_add_sco(hdev, ev->status); + break; + + case HCI_OP_REMOTE_NAME_REQ: + hci_cs_remote_name_req(hdev, ev->status); + break; + + case HCI_OP_SETUP_SYNC_CONN: + hci_cs_setup_sync_conn(hdev, ev->status); + break; + + case HCI_OP_SNIFF_MODE: + hci_cs_sniff_mode(hdev, ev->status); + break; + + case HCI_OP_EXIT_SNIFF_MODE: + hci_cs_exit_sniff_mode(hdev, ev->status); + break; + + default: + BT_DBG("%s opcode 0x%x", hdev->name, opcode); + break; + } + + if (ev->ncmd) { + atomic_set(&hdev->cmd_cnt, 1); + if (!skb_queue_empty(&hdev->cmd_q)) + hci_sched_cmd(hdev); + } +} + +static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_role_change *ev = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s status %d", hdev->name, ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); + if (conn) { + if (!ev->status) { + if (ev->role) + conn->link_mode &= ~HCI_LM_MASTER; + else + conn->link_mode |= HCI_LM_MASTER; + } + + clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend); + + hci_role_switch_cfm(conn, ev->status, ev->role); + } + + hci_dev_unlock(hdev); +} + +static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_num_comp_pkts *ev = (void *) skb->data; + __le16 *ptr; + int i; + + skb_pull(skb, sizeof(*ev)); + + BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl); + + if (skb->len < ev->num_hndl * 4) { + BT_DBG("%s bad parameters", hdev->name); + return; + } + + tasklet_disable(&hdev->tx_task); + + for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) { + struct hci_conn *conn; + __u16 handle, count; + + handle = __le16_to_cpu(get_unaligned(ptr++)); + count = __le16_to_cpu(get_unaligned(ptr++)); + + conn = hci_conn_hash_lookup_handle(hdev, handle); + if (conn) { + conn->sent -= count; + + if (conn->type == ACL_LINK) { + if ((hdev->acl_cnt += count) > hdev->acl_pkts) + hdev->acl_cnt = hdev->acl_pkts; + } else { + if ((hdev->sco_cnt += count) > hdev->sco_pkts) + hdev->sco_cnt = hdev->sco_pkts; + } + } + } + + hci_sched_tx(hdev); + + tasklet_enable(&hdev->tx_task); +} + +static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_mode_change *ev = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s status %d", hdev->name, ev->status); @@ -1073,17 +1184,39 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); - if (conn && !ev->status) { - memcpy(conn->features, ev->features, sizeof(conn->features)); + if (conn) { + conn->mode = ev->mode; + conn->interval = __le16_to_cpu(ev->interval); + + if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { + if (conn->mode == HCI_CM_ACTIVE) + conn->power_save = 1; + else + conn->power_save = 0; + } } hci_dev_unlock(hdev); } -/* Clock Offset */ +static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + BT_DBG("%s", hdev->name); +} + +static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + BT_DBG("%s", hdev->name); +} + +static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + BT_DBG("%s", hdev->name); +} + static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) { - struct hci_ev_clock_offset *ev = (struct hci_ev_clock_offset *) skb->data; + struct hci_ev_clock_offset *ev = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s status %d", hdev->name, ev->status); @@ -1103,10 +1236,9 @@ static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *sk hci_dev_unlock(hdev); } -/* Page Scan Repetition Mode */ static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) { - struct hci_ev_pscan_rep_mode *ev = (struct hci_ev_pscan_rep_mode *) skb->data; + struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; struct inquiry_entry *ie; BT_DBG("%s", hdev->name); @@ -1121,10 +1253,91 @@ static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff * hci_dev_unlock(hdev); } -/* Sniff Subrate */ +static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct inquiry_data data; + int num_rsp = *((__u8 *) skb->data); + + BT_DBG("%s num_rsp %d", hdev->name, num_rsp); + + if (!num_rsp) + return; + + hci_dev_lock(hdev); + + if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { + struct inquiry_info_with_rssi_and_pscan_mode *info = (void *) (skb->data + 1); + + for (; num_rsp; num_rsp--) { + bacpy(&data.bdaddr, &info->bdaddr); + data.pscan_rep_mode = info->pscan_rep_mode; + data.pscan_period_mode = info->pscan_period_mode; + data.pscan_mode = info->pscan_mode; + memcpy(data.dev_class, info->dev_class, 3); + data.clock_offset = info->clock_offset; + data.rssi = info->rssi; + info++; + hci_inquiry_cache_update(hdev, &data); + } + } else { + struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); + + for (; num_rsp; num_rsp--) { + bacpy(&data.bdaddr, &info->bdaddr); + data.pscan_rep_mode = info->pscan_rep_mode; + data.pscan_period_mode = info->pscan_period_mode; + data.pscan_mode = 0x00; + memcpy(data.dev_class, info->dev_class, 3); + data.clock_offset = info->clock_offset; + data.rssi = info->rssi; + info++; + hci_inquiry_cache_update(hdev, &data); + } + } + + hci_dev_unlock(hdev); +} + +static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + BT_DBG("%s", hdev->name); +} + +static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_sync_conn_complete *ev = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s status %d", hdev->name, ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); + if (!conn) + goto unlock; + + if (!ev->status) { + conn->handle = __le16_to_cpu(ev->handle); + conn->state = BT_CONNECTED; + } else + conn->state = BT_CLOSED; + + hci_proto_connect_cfm(conn, ev->status); + if (ev->status) + hci_conn_del(conn); + +unlock: + hci_dev_unlock(hdev); +} + +static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + BT_DBG("%s", hdev->name); +} + static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) { - struct hci_ev_sniff_subrate *ev = (struct hci_ev_sniff_subrate *) skb->data; + struct hci_ev_sniff_subrate *ev = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s status %d", hdev->name, ev->status); @@ -1138,22 +1351,42 @@ static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *s hci_dev_unlock(hdev); } -void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) { - struct hci_event_hdr *hdr = (struct hci_event_hdr *) skb->data; - struct hci_ev_cmd_complete *ec; - struct hci_ev_cmd_status *cs; - u16 opcode, ocf, ogf; + struct inquiry_data data; + struct extended_inquiry_info *info = (void *) (skb->data + 1); + int num_rsp = *((__u8 *) skb->data); - skb_pull(skb, HCI_EVENT_HDR_SIZE); + BT_DBG("%s num_rsp %d", hdev->name, num_rsp); - BT_DBG("%s evt 0x%x", hdev->name, hdr->evt); + if (!num_rsp) + return; - switch (hdr->evt) { - case HCI_EV_NUM_COMP_PKTS: - hci_num_comp_pkts_evt(hdev, skb); - break; + hci_dev_lock(hdev); + + for (; num_rsp; num_rsp--) { + bacpy(&data.bdaddr, &info->bdaddr); + data.pscan_rep_mode = info->pscan_rep_mode; + data.pscan_period_mode = info->pscan_period_mode; + data.pscan_mode = 0x00; + memcpy(data.dev_class, info->dev_class, 3); + data.clock_offset = info->clock_offset; + data.rssi = info->rssi; + info++; + hci_inquiry_cache_update(hdev, &data); + } + hci_dev_unlock(hdev); +} + +void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_event_hdr *hdr = (void *) skb->data; + __u8 event = hdr->evt; + + skb_pull(skb, HCI_EVENT_HDR_SIZE); + + switch (event) { case HCI_EV_INQUIRY_COMPLETE: hci_inquiry_complete_evt(hdev, skb); break; @@ -1162,44 +1395,64 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) hci_inquiry_result_evt(hdev, skb); break; - case HCI_EV_INQUIRY_RESULT_WITH_RSSI: - hci_inquiry_result_with_rssi_evt(hdev, skb); - break; - - case HCI_EV_EXTENDED_INQUIRY_RESULT: - hci_extended_inquiry_result_evt(hdev, skb); + case HCI_EV_CONN_COMPLETE: + hci_conn_complete_evt(hdev, skb); break; case HCI_EV_CONN_REQUEST: hci_conn_request_evt(hdev, skb); break; - case HCI_EV_CONN_COMPLETE: - hci_conn_complete_evt(hdev, skb); - break; - case HCI_EV_DISCONN_COMPLETE: hci_disconn_complete_evt(hdev, skb); break; - case HCI_EV_ROLE_CHANGE: - hci_role_change_evt(hdev, skb); - break; - - case HCI_EV_MODE_CHANGE: - hci_mode_change_evt(hdev, skb); - break; - case HCI_EV_AUTH_COMPLETE: hci_auth_complete_evt(hdev, skb); break; + case HCI_EV_REMOTE_NAME: + hci_remote_name_evt(hdev, skb); + break; + case HCI_EV_ENCRYPT_CHANGE: hci_encrypt_change_evt(hdev, skb); break; - case HCI_EV_CHANGE_CONN_LINK_KEY_COMPLETE: - hci_change_conn_link_key_complete_evt(hdev, skb); + case HCI_EV_CHANGE_LINK_KEY_COMPLETE: + hci_change_link_key_complete_evt(hdev, skb); + break; + + case HCI_EV_REMOTE_FEATURES: + hci_remote_features_evt(hdev, skb); + break; + + case HCI_EV_REMOTE_VERSION: + hci_remote_version_evt(hdev, skb); + break; + + case HCI_EV_QOS_SETUP_COMPLETE: + hci_qos_setup_complete_evt(hdev, skb); + break; + + case HCI_EV_CMD_COMPLETE: + hci_cmd_complete_evt(hdev, skb); + break; + + case HCI_EV_CMD_STATUS: + hci_cmd_status_evt(hdev, skb); + break; + + case HCI_EV_ROLE_CHANGE: + hci_role_change_evt(hdev, skb); + break; + + case HCI_EV_NUM_COMP_PKTS: + hci_num_comp_pkts_evt(hdev, skb); + break; + + case HCI_EV_MODE_CHANGE: + hci_mode_change_evt(hdev, skb); break; case HCI_EV_PIN_CODE_REQ: @@ -1214,10 +1467,6 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) hci_link_key_notify_evt(hdev, skb); break; - case HCI_EV_REMOTE_FEATURES: - hci_remote_features_evt(hdev, skb); - break; - case HCI_EV_CLOCK_OFFSET: hci_clock_offset_evt(hdev, skb); break; @@ -1226,82 +1475,32 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) hci_pscan_rep_mode_evt(hdev, skb); break; - case HCI_EV_SNIFF_SUBRATE: - hci_sniff_subrate_evt(hdev, skb); + case HCI_EV_INQUIRY_RESULT_WITH_RSSI: + hci_inquiry_result_with_rssi_evt(hdev, skb); break; - case HCI_EV_CMD_STATUS: - cs = (struct hci_ev_cmd_status *) skb->data; - skb_pull(skb, sizeof(cs)); - - opcode = __le16_to_cpu(cs->opcode); - ogf = hci_opcode_ogf(opcode); - ocf = hci_opcode_ocf(opcode); - - switch (ogf) { - case OGF_INFO_PARAM: - hci_cs_info_param(hdev, ocf, cs->status); - break; - - case OGF_HOST_CTL: - hci_cs_host_ctl(hdev, ocf, cs->status); - break; - - case OGF_LINK_CTL: - hci_cs_link_ctl(hdev, ocf, cs->status); - break; - - case OGF_LINK_POLICY: - hci_cs_link_policy(hdev, ocf, cs->status); - break; - - default: - BT_DBG("%s Command Status OGF %x", hdev->name, ogf); - break; - } - - if (cs->ncmd) { - atomic_set(&hdev->cmd_cnt, 1); - if (!skb_queue_empty(&hdev->cmd_q)) - hci_sched_cmd(hdev); - } + case HCI_EV_REMOTE_EXT_FEATURES: + hci_remote_ext_features_evt(hdev, skb); break; - case HCI_EV_CMD_COMPLETE: - ec = (struct hci_ev_cmd_complete *) skb->data; - skb_pull(skb, sizeof(*ec)); - - opcode = __le16_to_cpu(ec->opcode); - ogf = hci_opcode_ogf(opcode); - ocf = hci_opcode_ocf(opcode); - - switch (ogf) { - case OGF_INFO_PARAM: - hci_cc_info_param(hdev, ocf, skb); - break; - - case OGF_HOST_CTL: - hci_cc_host_ctl(hdev, ocf, skb); - break; + case HCI_EV_SYNC_CONN_COMPLETE: + hci_sync_conn_complete_evt(hdev, skb); + break; - case OGF_LINK_CTL: - hci_cc_link_ctl(hdev, ocf, skb); - break; + case HCI_EV_SYNC_CONN_CHANGED: + hci_sync_conn_changed_evt(hdev, skb); + break; - case OGF_LINK_POLICY: - hci_cc_link_policy(hdev, ocf, skb); - break; + case HCI_EV_SNIFF_SUBRATE: + hci_sniff_subrate_evt(hdev, skb); + break; - default: - BT_DBG("%s Command Completed OGF %x", hdev->name, ogf); - break; - } + case HCI_EV_EXTENDED_INQUIRY_RESULT: + hci_extended_inquiry_result_evt(hdev, skb); + break; - if (ec->ncmd) { - atomic_set(&hdev->cmd_cnt, 1); - if (!skb_queue_empty(&hdev->cmd_q)) - hci_sched_cmd(hdev); - } + default: + BT_DBG("%s event 0x%x", hdev->name, event); break; } diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 43dd6373bff9..8825102c517c 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -451,7 +451,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, goto drop; } - if (test_bit(HCI_RAW, &hdev->flags) || (ogf == OGF_VENDOR_CMD)) { + if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) { skb_queue_tail(&hdev->raw_q, skb); hci_sched_tx(hdev); } else { diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 25835403d659..cef1e3e1881c 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c @@ -41,6 +41,26 @@ static ssize_t show_type(struct device *dev, struct device_attribute *attr, char return sprintf(buf, "%s\n", typetostr(hdev->type)); } +static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + char name[249]; + int i; + + for (i = 0; i < 248; i++) + name[i] = hdev->dev_name[i]; + + name[248] = '\0'; + return sprintf(buf, "%s\n", name); +} + +static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + return sprintf(buf, "0x%.2x%.2x%.2x\n", + hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); +} + static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = dev_get_drvdata(dev); @@ -49,6 +69,17 @@ static ssize_t show_address(struct device *dev, struct device_attribute *attr, c return sprintf(buf, "%s\n", batostr(&bdaddr)); } +static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", + hdev->features[0], hdev->features[1], + hdev->features[2], hdev->features[3], + hdev->features[4], hdev->features[5], + hdev->features[6], hdev->features[7]); +} + static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = dev_get_drvdata(dev); @@ -170,7 +201,10 @@ static ssize_t store_sniff_min_interval(struct device *dev, struct device_attrib } static DEVICE_ATTR(type, S_IRUGO, show_type, NULL); +static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); +static DEVICE_ATTR(class, S_IRUGO, show_class, NULL); static DEVICE_ATTR(address, S_IRUGO, show_address, NULL); +static DEVICE_ATTR(features, S_IRUGO, show_features, NULL); static DEVICE_ATTR(manufacturer, S_IRUGO, show_manufacturer, NULL); static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL); static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL); @@ -185,7 +219,10 @@ static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR, static struct device_attribute *bt_attrs[] = { &dev_attr_type, + &dev_attr_name, + &dev_attr_class, &dev_attr_address, + &dev_attr_features, &dev_attr_manufacturer, &dev_attr_hci_version, &dev_attr_hci_revision, diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 66c736953cfe..4bbacddeb49d 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -247,7 +247,7 @@ static inline int hidp_queue_report(struct hidp_session *session, unsigned char { struct sk_buff *skb; - BT_DBG("session %p hid %p data %p size %d", session, device, data, size); + BT_DBG("session %p hid %p data %p size %d", session, session->hid, data, size); if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) { BT_ERR("Can't allocate memory for new frame"); diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 36ef27b625db..6fbbae78b304 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c @@ -55,7 +55,9 @@ #define BT_DBG(D...) #endif -#define VERSION "2.8" +#define VERSION "2.9" + +static u32 l2cap_feat_mask = 0x0000; static const struct proto_ops l2cap_sock_ops; @@ -258,7 +260,119 @@ static void l2cap_chan_del(struct sock *sk, int err) sk->sk_state_change(sk); } +static inline u8 l2cap_get_ident(struct l2cap_conn *conn) +{ + u8 id; + + /* Get next available identificator. + * 1 - 128 are used by kernel. + * 129 - 199 are reserved. + * 200 - 254 are used by utilities like l2ping, etc. + */ + + spin_lock_bh(&conn->lock); + + if (++conn->tx_ident > 128) + conn->tx_ident = 1; + + id = conn->tx_ident; + + spin_unlock_bh(&conn->lock); + + return id; +} + +static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data) +{ + struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); + + BT_DBG("code 0x%2.2x", code); + + if (!skb) + return -ENOMEM; + + return hci_send_acl(conn->hcon, skb, 0); +} + /* ---- L2CAP connections ---- */ +static void l2cap_conn_start(struct l2cap_conn *conn) +{ + struct l2cap_chan_list *l = &conn->chan_list; + struct sock *sk; + + BT_DBG("conn %p", conn); + + read_lock(&l->lock); + + for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { + bh_lock_sock(sk); + + if (sk->sk_type != SOCK_SEQPACKET) { + l2cap_sock_clear_timer(sk); + sk->sk_state = BT_CONNECTED; + sk->sk_state_change(sk); + } else if (sk->sk_state == BT_CONNECT) { + struct l2cap_conn_req req; + l2cap_pi(sk)->ident = l2cap_get_ident(conn); + req.scid = cpu_to_le16(l2cap_pi(sk)->scid); + req.psm = l2cap_pi(sk)->psm; + l2cap_send_cmd(conn, l2cap_pi(sk)->ident, + L2CAP_CONN_REQ, sizeof(req), &req); + } + + bh_unlock_sock(sk); + } + + read_unlock(&l->lock); +} + +static void l2cap_conn_ready(struct l2cap_conn *conn) +{ + BT_DBG("conn %p", conn); + + if (conn->chan_list.head || !hlist_empty(&l2cap_sk_list.head)) { + struct l2cap_info_req req; + + req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); + + conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; + conn->info_ident = l2cap_get_ident(conn); + + mod_timer(&conn->info_timer, + jiffies + msecs_to_jiffies(L2CAP_INFO_TIMEOUT)); + + l2cap_send_cmd(conn, conn->info_ident, + L2CAP_INFO_REQ, sizeof(req), &req); + } +} + +/* Notify sockets that we cannot guaranty reliability anymore */ +static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err) +{ + struct l2cap_chan_list *l = &conn->chan_list; + struct sock *sk; + + BT_DBG("conn %p", conn); + + read_lock(&l->lock); + + for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { + if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE) + sk->sk_err = err; + } + + read_unlock(&l->lock); +} + +static void l2cap_info_timeout(unsigned long arg) +{ + struct l2cap_conn *conn = (void *) arg; + + conn->info_ident = 0; + + l2cap_conn_start(conn); +} + static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) { struct l2cap_conn *conn = hcon->l2cap_data; @@ -279,6 +393,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) conn->src = &hcon->hdev->bdaddr; conn->dst = &hcon->dst; + conn->feat_mask = 0; + + init_timer(&conn->info_timer); + conn->info_timer.function = l2cap_info_timeout; + conn->info_timer.data = (unsigned long) conn; + spin_lock_init(&conn->lock); rwlock_init(&conn->chan_list.lock); @@ -318,40 +438,6 @@ static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, stru write_unlock_bh(&l->lock); } -static inline u8 l2cap_get_ident(struct l2cap_conn *conn) -{ - u8 id; - - /* Get next available identificator. - * 1 - 128 are used by kernel. - * 129 - 199 are reserved. - * 200 - 254 are used by utilities like l2ping, etc. - */ - - spin_lock_bh(&conn->lock); - - if (++conn->tx_ident > 128) - conn->tx_ident = 1; - - id = conn->tx_ident; - - spin_unlock_bh(&conn->lock); - - return id; -} - -static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data) -{ - struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); - - BT_DBG("code 0x%2.2x", code); - - if (!skb) - return -ENOMEM; - - return hci_send_acl(conn->hcon, skb, 0); -} - /* ---- Socket interface ---- */ static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src) { @@ -508,7 +594,6 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent) /* Default config options */ pi->conf_len = 0; - pi->conf_mtu = L2CAP_DEFAULT_MTU; pi->flush_to = L2CAP_DEFAULT_FLUSH_TO; } @@ -530,7 +615,7 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p INIT_LIST_HEAD(&bt_sk(sk)->accept_q); sk->sk_destruct = l2cap_sock_destruct; - sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT; + sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT); sock_reset_flag(sk, SOCK_ZAPPED); @@ -650,6 +735,11 @@ static int l2cap_do_connect(struct sock *sk) l2cap_sock_set_timer(sk, sk->sk_sndtimeo); if (hcon->state == BT_CONNECTED) { + if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) { + l2cap_conn_ready(conn); + goto done; + } + if (sk->sk_type == SOCK_SEQPACKET) { struct l2cap_conn_req req; l2cap_pi(sk)->ident = l2cap_get_ident(conn); @@ -958,7 +1048,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch opts.imtu = l2cap_pi(sk)->imtu; opts.omtu = l2cap_pi(sk)->omtu; opts.flush_to = l2cap_pi(sk)->flush_to; - opts.mode = 0x00; + opts.mode = L2CAP_MODE_BASIC; len = min_t(unsigned int, sizeof(opts), optlen); if (copy_from_user((char *) &opts, optval, len)) { @@ -1007,7 +1097,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch opts.imtu = l2cap_pi(sk)->imtu; opts.omtu = l2cap_pi(sk)->omtu; opts.flush_to = l2cap_pi(sk)->flush_to; - opts.mode = 0x00; + opts.mode = L2CAP_MODE_BASIC; len = min_t(unsigned int, len, sizeof(opts)); if (copy_to_user(optval, (char *) &opts, len)) @@ -1084,52 +1174,6 @@ static int l2cap_sock_release(struct socket *sock) return err; } -static void l2cap_conn_ready(struct l2cap_conn *conn) -{ - struct l2cap_chan_list *l = &conn->chan_list; - struct sock *sk; - - BT_DBG("conn %p", conn); - - read_lock(&l->lock); - - for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { - bh_lock_sock(sk); - - if (sk->sk_type != SOCK_SEQPACKET) { - l2cap_sock_clear_timer(sk); - sk->sk_state = BT_CONNECTED; - sk->sk_state_change(sk); - } else if (sk->sk_state == BT_CONNECT) { - struct l2cap_conn_req req; - l2cap_pi(sk)->ident = l2cap_get_ident(conn); - req.scid = cpu_to_le16(l2cap_pi(sk)->scid); - req.psm = l2cap_pi(sk)->psm; - l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_REQ, sizeof(req), &req); - } - - bh_unlock_sock(sk); - } - - read_unlock(&l->lock); -} - -/* Notify sockets that we cannot guaranty reliability anymore */ -static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err) -{ - struct l2cap_chan_list *l = &conn->chan_list; - struct sock *sk; - - BT_DBG("conn %p", conn); - - read_lock(&l->lock); - for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { - if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE) - sk->sk_err = err; - } - read_unlock(&l->lock); -} - static void l2cap_chan_ready(struct sock *sk) { struct sock *parent = bt_sk(sk)->parent; @@ -1256,11 +1300,11 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned break; case 2: - *val = __le16_to_cpu(*((__le16 *)opt->val)); + *val = __le16_to_cpu(*((__le16 *) opt->val)); break; case 4: - *val = __le32_to_cpu(*((__le32 *)opt->val)); + *val = __le32_to_cpu(*((__le32 *) opt->val)); break; default: @@ -1332,6 +1376,8 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data) int len = pi->conf_len; int type, hint, olen; unsigned long val; + struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; + u16 mtu = L2CAP_DEFAULT_MTU; u16 result = L2CAP_CONF_SUCCESS; BT_DBG("sk %p", sk); @@ -1344,7 +1390,7 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data) switch (type) { case L2CAP_CONF_MTU: - pi->conf_mtu = val; + mtu = val; break; case L2CAP_CONF_FLUSH_TO: @@ -1354,6 +1400,11 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data) case L2CAP_CONF_QOS: break; + case L2CAP_CONF_RFC: + if (olen == sizeof(rfc)) + memcpy(&rfc, (void *) val, olen); + break; + default: if (hint) break; @@ -1368,12 +1419,24 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data) /* Configure output options and let the other side know * which ones we don't like. */ - if (pi->conf_mtu < pi->omtu) + if (rfc.mode == L2CAP_MODE_BASIC) { + if (mtu < pi->omtu) + result = L2CAP_CONF_UNACCEPT; + else { + pi->omtu = mtu; + pi->conf_state |= L2CAP_CONF_OUTPUT_DONE; + } + + l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu); + } else { result = L2CAP_CONF_UNACCEPT; - else - pi->omtu = pi->conf_mtu; - l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu); + memset(&rfc, 0, sizeof(rfc)); + rfc.mode = L2CAP_MODE_BASIC; + + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, + sizeof(rfc), (unsigned long) &rfc); + } } rsp->scid = cpu_to_le16(pi->dcid); @@ -1397,6 +1460,23 @@ static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 fla return ptr - data; } +static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) +{ + struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data; + + if (rej->reason != 0x0000) + return 0; + + if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) && + cmd->ident == conn->info_ident) { + conn->info_ident = 0; + del_timer(&conn->info_timer); + l2cap_conn_start(conn); + } + + return 0; +} + static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) { struct l2cap_chan_list *list = &conn->chan_list; @@ -1577,16 +1657,19 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); - /* Output config done. */ - l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE; - /* Reset config buffer. */ l2cap_pi(sk)->conf_len = 0; + if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE)) + goto unlock; + if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) { sk->sk_state = BT_CONNECTED; l2cap_chan_ready(sk); - } else if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) { + goto unlock; + } + + if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) { u8 req[64]; l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(sk, req), req); @@ -1646,7 +1729,6 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr if (flags & 0x01) goto done; - /* Input config done */ l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE; if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) { @@ -1711,16 +1793,27 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) { struct l2cap_info_req *req = (struct l2cap_info_req *) data; - struct l2cap_info_rsp rsp; u16 type; type = __le16_to_cpu(req->type); BT_DBG("type 0x%4.4x", type); - rsp.type = cpu_to_le16(type); - rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); - l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp); + if (type == L2CAP_IT_FEAT_MASK) { + u8 buf[8]; + struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; + rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); + rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); + put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data); + l2cap_send_cmd(conn, cmd->ident, + L2CAP_INFO_RSP, sizeof(buf), buf); + } else { + struct l2cap_info_rsp rsp; + rsp.type = cpu_to_le16(type); + rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); + l2cap_send_cmd(conn, cmd->ident, + L2CAP_INFO_RSP, sizeof(rsp), &rsp); + } return 0; } @@ -1735,6 +1828,15 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm BT_DBG("type 0x%4.4x result 0x%2.2x", type, result); + conn->info_ident = 0; + + del_timer(&conn->info_timer); + + if (type == L2CAP_IT_FEAT_MASK) + conn->feat_mask = __le32_to_cpu(get_unaligned((__le32 *) rsp->data)); + + l2cap_conn_start(conn); + return 0; } @@ -1764,7 +1866,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *sk switch (cmd.code) { case L2CAP_COMMAND_REJ: - /* FIXME: We should process this */ + l2cap_command_rej(conn, &cmd, data); break; case L2CAP_CONN_REQ: diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index bb7220770f2c..e7ac6ba7ecab 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -33,11 +33,11 @@ #include <linux/sched.h> #include <linux/signal.h> #include <linux/init.h> -#include <linux/freezer.h> #include <linux/wait.h> #include <linux/device.h> #include <linux/net.h> #include <linux/mutex.h> +#include <linux/kthread.h> #include <net/sock.h> #include <asm/uaccess.h> @@ -68,7 +68,6 @@ static DEFINE_MUTEX(rfcomm_mutex); static unsigned long rfcomm_event; static LIST_HEAD(session_list); -static atomic_t terminate, running; static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len); static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci); @@ -1850,26 +1849,6 @@ static inline void rfcomm_process_sessions(void) rfcomm_unlock(); } -static void rfcomm_worker(void) -{ - BT_DBG(""); - - while (!atomic_read(&terminate)) { - set_current_state(TASK_INTERRUPTIBLE); - if (!test_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event)) { - /* No pending events. Let's sleep. - * Incoming connections and data will wake us up. */ - schedule(); - } - set_current_state(TASK_RUNNING); - - /* Process stuff */ - clear_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event); - rfcomm_process_sessions(); - } - return; -} - static int rfcomm_add_listener(bdaddr_t *ba) { struct sockaddr_l2 addr; @@ -1935,22 +1914,28 @@ static void rfcomm_kill_listener(void) static int rfcomm_run(void *unused) { - rfcomm_thread = current; - - atomic_inc(&running); + BT_DBG(""); - daemonize("krfcommd"); set_user_nice(current, -10); - BT_DBG(""); - rfcomm_add_listener(BDADDR_ANY); - rfcomm_worker(); + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + if (!test_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event)) { + /* No pending events. Let's sleep. + * Incoming connections and data will wake us up. */ + schedule(); + } + set_current_state(TASK_RUNNING); + + /* Process stuff */ + clear_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event); + rfcomm_process_sessions(); + } rfcomm_kill_listener(); - atomic_dec(&running); return 0; } @@ -2059,7 +2044,11 @@ static int __init rfcomm_init(void) hci_register_cb(&rfcomm_cb); - kernel_thread(rfcomm_run, NULL, CLONE_KERNEL); + rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd"); + if (IS_ERR(rfcomm_thread)) { + hci_unregister_cb(&rfcomm_cb); + return PTR_ERR(rfcomm_thread); + } if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0) BT_ERR("Failed to create RFCOMM info file"); @@ -2081,14 +2070,7 @@ static void __exit rfcomm_exit(void) hci_unregister_cb(&rfcomm_cb); - /* Terminate working thread. - * ie. Set terminate flag and wake it up */ - atomic_inc(&terminate); - rfcomm_schedule(RFCOMM_SCHED_STATE); - - /* Wait until thread is running */ - while (atomic_read(&running)) - schedule(); + kthread_stop(rfcomm_thread); #ifdef CONFIG_BT_RFCOMM_TTY rfcomm_cleanup_ttys(); diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 22a832098d44..e447651a2dbe 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -189,6 +189,23 @@ static struct device *rfcomm_get_device(struct rfcomm_dev *dev) return conn ? &conn->dev : NULL; } +static ssize_t show_address(struct device *tty_dev, struct device_attribute *attr, char *buf) +{ + struct rfcomm_dev *dev = dev_get_drvdata(tty_dev); + bdaddr_t bdaddr; + baswap(&bdaddr, &dev->dst); + return sprintf(buf, "%s\n", batostr(&bdaddr)); +} + +static ssize_t show_channel(struct device *tty_dev, struct device_attribute *attr, char *buf) +{ + struct rfcomm_dev *dev = dev_get_drvdata(tty_dev); + return sprintf(buf, "%d\n", dev->channel); +} + +static DEVICE_ATTR(address, S_IRUGO, show_address, NULL); +static DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL); + static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) { struct rfcomm_dev *dev; @@ -281,6 +298,14 @@ out: return err; } + dev_set_drvdata(dev->tty_dev, dev); + + if (device_create_file(dev->tty_dev, &dev_attr_address) < 0) + BT_ERR("Failed to create address attribute"); + + if (device_create_file(dev->tty_dev, &dev_attr_channel) < 0) + BT_ERR("Failed to create channel attribute"); + return dev->id; } diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 65b6fb1c4154..82d0dfdfa7e2 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -189,7 +189,7 @@ static int sco_connect(struct sock *sk) struct sco_conn *conn; struct hci_conn *hcon; struct hci_dev *hdev; - int err = 0; + int err, type; BT_DBG("%s -> %s", batostr(src), batostr(dst)); @@ -200,7 +200,9 @@ static int sco_connect(struct sock *sk) err = -ENOMEM; - hcon = hci_connect(hdev, SCO_LINK, dst); + type = lmp_esco_capable(hdev) ? ESCO_LINK : SCO_LINK; + + hcon = hci_connect(hdev, type, dst); if (!hcon) goto done; @@ -224,6 +226,7 @@ static int sco_connect(struct sock *sk) sk->sk_state = BT_CONNECT; sco_sock_set_timer(sk, sk->sk_sndtimeo); } + done: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); @@ -846,7 +849,7 @@ static int sco_connect_cfm(struct hci_conn *hcon, __u8 status) { BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); - if (hcon->type != SCO_LINK) + if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) return 0; if (!status) { @@ -865,10 +868,11 @@ static int sco_disconn_ind(struct hci_conn *hcon, __u8 reason) { BT_DBG("hcon %p reason %d", hcon, reason); - if (hcon->type != SCO_LINK) + if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) return 0; sco_conn_del(hcon, bt_err(reason)); + return 0; } diff --git a/net/core/dev.c b/net/core/dev.c index 38b03da5c1ca..872658927e47 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1553,7 +1553,7 @@ gso: return rc; } if (unlikely((netif_queue_stopped(dev) || - netif_subqueue_stopped(dev, skb->queue_mapping)) && + netif_subqueue_stopped(dev, skb)) && skb->next)) return NETDEV_TX_BUSY; } while (skb->next); @@ -1661,7 +1661,7 @@ gso: q = dev->qdisc; if (q->enqueue) { /* reset queue_mapping to zero */ - skb->queue_mapping = 0; + skb_set_queue_mapping(skb, 0); rc = q->enqueue(skb, q); qdisc_run(dev); spin_unlock(&dev->queue_lock); @@ -1692,7 +1692,7 @@ gso: HARD_TX_LOCK(dev, cpu); if (!netif_queue_stopped(dev) && - !netif_subqueue_stopped(dev, skb->queue_mapping)) { + !netif_subqueue_stopped(dev, skb)) { rc = 0; if (!dev_hard_start_xmit(skb, dev)) { HARD_TX_UNLOCK(dev); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 67ba9914e52e..05979e356963 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1438,6 +1438,9 @@ int neigh_table_clear(struct neigh_table *tbl) free_percpu(tbl->stats); tbl->stats = NULL; + kmem_cache_destroy(tbl->kmem_cachep); + tbl->kmem_cachep = NULL; + return 0; } diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 95daba624967..bf8d18f1b013 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -67,7 +67,7 @@ static void queue_process(struct work_struct *work) local_irq_save(flags); netif_tx_lock(dev); if ((netif_queue_stopped(dev) || - netif_subqueue_stopped(dev, skb->queue_mapping)) || + netif_subqueue_stopped(dev, skb)) || dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { skb_queue_head(&npinfo->txq, skb); netif_tx_unlock(dev); @@ -269,7 +269,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) tries > 0; --tries) { if (netif_tx_trylock(dev)) { if (!netif_queue_stopped(dev) && - !netif_subqueue_stopped(dev, skb->queue_mapping)) + !netif_subqueue_stopped(dev, skb)) status = dev->hard_start_xmit(skb, dev); netif_tx_unlock(dev); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index c4719edb55c0..de33f36947e9 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2603,8 +2603,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, skb->network_header = skb->tail; skb->transport_header = skb->network_header + sizeof(struct iphdr); skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr)); - skb->queue_mapping = pkt_dev->cur_queue_map; - + skb_set_queue_mapping(skb, pkt_dev->cur_queue_map); iph = ip_hdr(skb); udph = udp_hdr(skb); @@ -2941,8 +2940,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, skb->network_header = skb->tail; skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr)); - skb->queue_mapping = pkt_dev->cur_queue_map; - + skb_set_queue_mapping(skb, pkt_dev->cur_queue_map); iph = ipv6_hdr(skb); udph = udp_hdr(skb); @@ -3385,7 +3383,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) if ((netif_queue_stopped(odev) || (pkt_dev->skb && - netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping))) || + netif_subqueue_stopped(odev, pkt_dev->skb))) || need_resched()) { idle_start = getCurUs(); @@ -3402,7 +3400,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) pkt_dev->idle_acc += getCurUs() - idle_start; if (netif_queue_stopped(odev) || - netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) { + netif_subqueue_stopped(odev, pkt_dev->skb)) { pkt_dev->next_tx_us = getCurUs(); /* TODO */ pkt_dev->next_tx_ns = 0; goto out; /* Try the next interface */ @@ -3431,7 +3429,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) netif_tx_lock_bh(odev); if (!netif_queue_stopped(odev) && - !netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) { + !netif_subqueue_stopped(odev, pkt_dev->skb)) { atomic_inc(&(pkt_dev->skb->users)); retry_now: diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 70d9b5da96ae..4e2c84fcf276 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2045,7 +2045,7 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) if (copy > 0) { if (copy > len) copy = len; - sg[elt].page = virt_to_page(skb->data + offset); + sg_set_page(&sg[elt], virt_to_page(skb->data + offset)); sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; sg[elt].length = copy; elt++; @@ -2065,7 +2065,7 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) if (copy > len) copy = len; - sg[elt].page = frag->page; + sg_set_page(&sg[elt], frag->page); sg[elt].offset = frag->page_offset+offset-start; sg[elt].length = copy; elt++; diff --git a/net/dccp/diag.c b/net/dccp/diag.c index 0f3745585a94..d8a3509b26f6 100644 --- a/net/dccp/diag.c +++ b/net/dccp/diag.c @@ -68,3 +68,4 @@ module_exit(dccp_diag_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>"); MODULE_DESCRIPTION("DCCP inet_diag handler"); +MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_INET_DIAG, DCCPDIAG_GETSOCK); diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 44f6e17e105f..222549ab274a 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -1037,8 +1037,8 @@ module_exit(dccp_v4_exit); * values directly, Also cover the case where the protocol is not specified, * i.e. net-pf-PF_INET-proto-0-type-SOCK_DCCP */ -MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-33-type-6"); -MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-0-type-6"); +MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 33, 6); +MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 0, 6); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>"); MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol"); diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index cac53548c2d8..bbadd6681b83 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -1219,8 +1219,8 @@ module_exit(dccp_v6_exit); * values directly, Also cover the case where the protocol is not specified, * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP */ -MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-33-type-6"); -MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-0-type-6"); +MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6); +MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>"); MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol"); diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c index 72e6ab66834f..811777682e2b 100644 --- a/net/ieee80211/ieee80211_crypt_tkip.c +++ b/net/ieee80211/ieee80211_crypt_tkip.c @@ -14,6 +14,7 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/random.h> +#include <linux/scatterlist.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/mm.h> @@ -390,9 +391,7 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) icv[3] = crc >> 24; crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16); - sg.page = virt_to_page(pos); - sg.offset = offset_in_page(pos); - sg.length = len + 4; + sg_init_one(&sg, pos, len + 4); return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); } @@ -485,9 +484,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) plen = skb->len - hdr_len - 12; crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); - sg.page = virt_to_page(pos); - sg.offset = offset_in_page(pos); - sg.length = plen + 4; + sg_init_one(&sg, pos, plen + 4); if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) { if (net_ratelimit()) { printk(KERN_DEBUG ": TKIP: failed to decrypt " @@ -539,11 +536,12 @@ static int michael_mic(struct crypto_hash *tfm_michael, u8 * key, u8 * hdr, printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n"); return -1; } - sg[0].page = virt_to_page(hdr); + sg_init_table(sg, 2); + sg_set_page(&sg[0], virt_to_page(hdr)); sg[0].offset = offset_in_page(hdr); sg[0].length = 16; - sg[1].page = virt_to_page(data); + sg_set_page(&sg[1], virt_to_page(data)); sg[1].offset = offset_in_page(data); sg[1].length = data_len; diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c index 8d182459344e..9693429489ed 100644 --- a/net/ieee80211/ieee80211_crypt_wep.c +++ b/net/ieee80211/ieee80211_crypt_wep.c @@ -14,6 +14,7 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/random.h> +#include <linux/scatterlist.h> #include <linux/skbuff.h> #include <linux/mm.h> #include <asm/string.h> @@ -170,9 +171,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) icv[3] = crc >> 24; crypto_blkcipher_setkey(wep->tx_tfm, key, klen); - sg.page = virt_to_page(pos); - sg.offset = offset_in_page(pos); - sg.length = len + 4; + sg_init_one(&sg, pos, len + 4); return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); } @@ -212,9 +211,7 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) plen = skb->len - hdr_len - 8; crypto_blkcipher_setkey(wep->rx_tfm, key, klen); - sg.page = virt_to_page(pos); - sg.offset = offset_in_page(pos); - sg.length = plen + 4; + sg_init_one(&sg, pos, plen + 4); if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) return -7; diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 7eb83ebed2ec..dc429b6b0ba6 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -815,6 +815,12 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) nlmsg_len(nlh) < hdrlen) return -EINVAL; +#ifdef CONFIG_KMOD + if (inet_diag_table[nlh->nlmsg_type] == NULL) + request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, + NETLINK_INET_DIAG, nlh->nlmsg_type); +#endif + if (inet_diag_table[nlh->nlmsg_type] == NULL) return -ENOENT; @@ -914,3 +920,4 @@ static void __exit inet_diag_exit(void) module_init(inet_diag_init); module_exit(inet_diag_exit); MODULE_LICENSE("GPL"); +MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_INET_DIAG); diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c index 3904d2158a92..2fbcc7d1b1a0 100644 --- a/net/ipv4/tcp_diag.c +++ b/net/ipv4/tcp_diag.c @@ -56,3 +56,4 @@ static void __exit tcp_diag_exit(void) module_init(tcp_diag_init); module_exit(tcp_diag_exit); MODULE_LICENSE("GPL"); +MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_INET_DIAG, TCPDIAG_GETSOCK); diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 67cd06613a25..66a9139d46e9 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -483,6 +483,7 @@ static int ah6_init_state(struct xfrm_state *x) break; case XFRM_MODE_TUNNEL: x->props.header_len += sizeof(struct ipv6hdr); + break; default: goto error; } diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index b0715432e454..72a659806cad 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -360,6 +360,7 @@ static int esp6_init_state(struct xfrm_state *x) break; case XFRM_MODE_TUNNEL: x->props.header_len += sizeof(struct ipv6hdr); + break; default: goto error; } diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c index 6675261e958f..a84a23310ff4 100644 --- a/net/mac80211/wep.c +++ b/net/mac80211/wep.c @@ -16,7 +16,7 @@ #include <linux/crypto.h> #include <linux/err.h> #include <linux/mm.h> -#include <asm/scatterlist.h> +#include <linux/scatterlist.h> #include <net/mac80211.h> #include "ieee80211_i.h" @@ -138,9 +138,7 @@ void ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, *icv = cpu_to_le32(~crc32_le(~0, data, data_len)); crypto_blkcipher_setkey(tfm, rc4key, klen); - sg.page = virt_to_page(data); - sg.offset = offset_in_page(data); - sg.length = data_len + WEP_ICV_LEN; + sg_init_one(&sg, data, data_len + WEP_ICV_LEN); crypto_blkcipher_encrypt(&desc, &sg, &sg, sg.length); } @@ -204,9 +202,7 @@ int ieee80211_wep_decrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, __le32 crc; crypto_blkcipher_setkey(tfm, rc4key, klen); - sg.page = virt_to_page(data); - sg.offset = offset_in_page(data); - sg.length = data_len + WEP_ICV_LEN; + sg_init_one(&sg, data, data_len + WEP_ICV_LEN); crypto_blkcipher_decrypt(&desc, &sg, &sg, sg.length); crc = cpu_to_le32(~crc32_le(~0, data, data_len)); diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index be57cf317a7f..421281d9dd1d 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -266,7 +266,7 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev) int busy; int nores; int len = skb->len; - int subq = skb->queue_mapping; + int subq = skb_get_queue_mapping(skb); struct sk_buff *skb_res = NULL; start = master->slaves; @@ -284,7 +284,7 @@ restart: if (slave->qdisc_sleeping != q) continue; if (netif_queue_stopped(slave) || - netif_subqueue_stopped(slave, subq) || + __netif_subqueue_stopped(slave, subq) || !netif_running(slave)) { busy = 1; continue; @@ -294,7 +294,7 @@ restart: case 0: if (netif_tx_trylock(slave)) { if (!netif_queue_stopped(slave) && - !netif_subqueue_stopped(slave, subq) && + !__netif_subqueue_stopped(slave, subq) && slave->hard_start_xmit(skb, slave) == 0) { netif_tx_unlock(slave); master->slaves = NEXT_SLAVE(q); diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 781810724714..cbd64b216cce 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -726,7 +726,8 @@ void sctp_auth_calculate_hmac(const struct sctp_association *asoc, /* set up scatter list */ end = skb_tail_pointer(skb); - sg.page = virt_to_page(auth); + sg_init_table(&sg, 1); + sg_set_page(&sg, virt_to_page(auth)); sg.offset = (unsigned long)(auth) % PAGE_SIZE; sg.length = end - (unsigned char *)auth; diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index f983a369d4e2..658476c4d587 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -56,7 +56,7 @@ #include <linux/ipv6.h> #include <linux/net.h> #include <linux/inet.h> -#include <asm/scatterlist.h> +#include <linux/scatterlist.h> #include <linux/crypto.h> #include <net/sock.h> @@ -1513,7 +1513,8 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, struct hash_desc desc; /* Sign the message. */ - sg.page = virt_to_page(&cookie->c); + sg_init_table(&sg, 1); + sg_set_page(&sg, virt_to_page(&cookie->c)); sg.offset = (unsigned long)(&cookie->c) % PAGE_SIZE; sg.length = bodysize; keylen = SCTP_SECRET_SIZE; @@ -1585,7 +1586,8 @@ struct sctp_association *sctp_unpack_cookie( /* Check the signature. */ keylen = SCTP_SECRET_SIZE; - sg.page = virt_to_page(bear_cookie); + sg_init_table(&sg, 1); + sg_set_page(&sg, virt_to_page(bear_cookie)); sg.offset = (unsigned long)(bear_cookie) % PAGE_SIZE; sg.length = bodysize; key = (char *)ep->secret_key[ep->current_key]; diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index bfb6a29633dd..32be431affcf 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -197,9 +197,9 @@ encryptor(struct scatterlist *sg, void *data) int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT; in_page = desc->pages[i]; } else { - in_page = sg->page; + in_page = sg_page(sg); } - desc->infrags[desc->fragno].page = in_page; + sg_set_page(&desc->infrags[desc->fragno], in_page); desc->fragno++; desc->fraglen += sg->length; desc->pos += sg->length; @@ -215,11 +215,11 @@ encryptor(struct scatterlist *sg, void *data) if (ret) return ret; if (fraglen) { - desc->outfrags[0].page = sg->page; + sg_set_page(&desc->outfrags[0], sg_page(sg)); desc->outfrags[0].offset = sg->offset + sg->length - fraglen; desc->outfrags[0].length = fraglen; desc->infrags[0] = desc->outfrags[0]; - desc->infrags[0].page = in_page; + sg_set_page(&desc->infrags[0], in_page); desc->fragno = 1; desc->fraglen = fraglen; } else { @@ -287,7 +287,7 @@ decryptor(struct scatterlist *sg, void *data) if (ret) return ret; if (fraglen) { - desc->frags[0].page = sg->page; + sg_set_page(&desc->frags[0], sg_page(sg)); desc->frags[0].offset = sg->offset + sg->length - fraglen; desc->frags[0].length = fraglen; desc->fragno = 1; diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 6a59180e1667..3d1f7cdf9dd0 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1059,7 +1059,7 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, do { if (thislen > page_len) thislen = page_len; - sg->page = buf->pages[i]; + sg_set_page(sg, buf->pages[i]); sg->offset = page_offset; sg->length = thislen; ret = actor(sg, data); diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c index 5ced62c19c63..313d4bed3aa9 100644 --- a/net/xfrm/xfrm_algo.c +++ b/net/xfrm/xfrm_algo.c @@ -13,6 +13,7 @@ #include <linux/kernel.h> #include <linux/pfkeyv2.h> #include <linux/crypto.h> +#include <linux/scatterlist.h> #include <net/xfrm.h> #if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE) #include <net/ah.h> @@ -552,7 +553,7 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, if (copy > len) copy = len; - sg.page = virt_to_page(skb->data + offset); + sg_set_page(&sg, virt_to_page(skb->data + offset)); sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; sg.length = copy; @@ -577,7 +578,7 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, if (copy > len) copy = len; - sg.page = frag->page; + sg_set_page(&sg, frag->page); sg.offset = frag->page_offset + offset-start; sg.length = copy; |