summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/ath/dfs_pri_detector.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-13 17:40:34 +0900
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-13 17:40:34 +0900
commit42a2d923cc349583ebf6fdd52a7d35e1c2f7e6bd (patch)
tree2b2b0c03b5389c1301800119333967efafd994ca /drivers/net/wireless/ath/dfs_pri_detector.c
parent5cbb3d216e2041700231bcfc383ee5f8b7fc8b74 (diff)
parent75ecab1df14d90e86cebef9ec5c76befde46e65f (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) The addition of nftables. No longer will we need protocol aware firewall filtering modules, it can all live in userspace. At the core of nftables is a, for lack of a better term, virtual machine that executes byte codes to inspect packet or metadata (arriving interface index, etc.) and make verdict decisions. Besides support for loading packet contents and comparing them, the interpreter supports lookups in various datastructures as fundamental operations. For example sets are supports, and therefore one could create a set of whitelist IP address entries which have ACCEPT verdicts attached to them, and use the appropriate byte codes to do such lookups. Since the interpreted code is composed in userspace, userspace can do things like optimize things before giving it to the kernel. Another major improvement is the capability of atomically updating portions of the ruleset. In the existing netfilter implementation, one has to update the entire rule set in order to make a change and this is very expensive. Userspace tools exist to create nftables rules using existing netfilter rule sets, but both kernel implementations will need to co-exist for quite some time as we transition from the old to the new stuff. Kudos to Patrick McHardy, Pablo Neira Ayuso, and others who have worked so hard on this. 2) Daniel Borkmann and Hannes Frederic Sowa made several improvements to our pseudo-random number generator, mostly used for things like UDP port randomization and netfitler, amongst other things. In particular the taus88 generater is updated to taus113, and test cases are added. 3) Support 64-bit rates in HTB and TBF schedulers, from Eric Dumazet and Yang Yingliang. 4) Add support for new 577xx tigon3 chips to tg3 driver, from Nithin Sujir. 5) Fix two fatal flaws in TCP dynamic right sizing, from Eric Dumazet, Neal Cardwell, and Yuchung Cheng. 6) Allow IP_TOS and IP_TTL to be specified in sendmsg() ancillary control message data, much like other socket option attributes. From Francesco Fusco. 7) Allow applications to specify a cap on the rate computed automatically by the kernel for pacing flows, via a new SO_MAX_PACING_RATE socket option. From Eric Dumazet. 8) Make the initial autotuned send buffer sizing in TCP more closely reflect actual needs, from Eric Dumazet. 9) Currently early socket demux only happens for TCP sockets, but we can do it for connected UDP sockets too. Implementation from Shawn Bohrer. 10) Refactor inet socket demux with the goal of improving hash demux performance for listening sockets. With the main goals being able to use RCU lookups on even request sockets, and eliminating the listening lock contention. From Eric Dumazet. 11) The bonding layer has many demuxes in it's fast path, and an RCU conversion was started back in 3.11, several changes here extend the RCU usage to even more locations. From Ding Tianhong and Wang Yufen, based upon suggestions by Nikolay Aleksandrov and Veaceslav Falico. 12) Allow stackability of segmentation offloads to, in particular, allow segmentation offloading over tunnels. From Eric Dumazet. 13) Significantly improve the handling of secret keys we input into the various hash functions in the inet hashtables, TCP fast open, as well as syncookies. From Hannes Frederic Sowa. The key fundamental operation is "net_get_random_once()" which uses static keys. Hannes even extended this to ipv4/ipv6 fragmentation handling and our generic flow dissector. 14) The generic driver layer takes care now to set the driver data to NULL on device removal, so it's no longer necessary for drivers to explicitly set it to NULL any more. Many drivers have been cleaned up in this way, from Jingoo Han. 15) Add a BPF based packet scheduler classifier, from Daniel Borkmann. 16) Improve CRC32 interfaces and generic SKB checksum iterators so that SCTP's checksumming can more cleanly be handled. Also from Daniel Borkmann. 17) Add a new PMTU discovery mode, IP_PMTUDISC_INTERFACE, which forces using the interface MTU value. This helps avoid PMTU attacks, particularly on DNS servers. From Hannes Frederic Sowa. 18) Use generic XPS for transmit queue steering rather than internal (re-)implementation in virtio-net. From Jason Wang. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1622 commits) random32: add test cases for taus113 implementation random32: upgrade taus88 generator to taus113 from errata paper random32: move rnd_state to linux/random.h random32: add prandom_reseed_late() and call when nonblocking pool becomes initialized random32: add periodic reseeding random32: fix off-by-one in seeding requirement PHY: Add RTL8201CP phy_driver to realtek xtsonic: add missing platform_set_drvdata() in xtsonic_probe() macmace: add missing platform_set_drvdata() in mace_probe() ethernet/arc/arc_emac: add missing platform_set_drvdata() in arc_emac_probe() ipv6: protect for_each_sk_fl_rcu in mem_check with rcu_read_lock_bh vlan: Implement vlan_dev_get_egress_qos_mask as an inline. ixgbe: add warning when max_vfs is out of range. igb: Update link modes display in ethtool netfilter: push reasm skb through instead of original frag skbs ip6_output: fragment outgoing reassembled skb properly MAINTAINERS: mv643xx_eth: take over maintainership from Lennart net_sched: tbf: support of 64bit rates ixgbe: deleting dfwd stations out of order can cause null ptr deref ixgbe: fix build err, num_rx_queues is only available with CONFIG_RPS ...
Diffstat (limited to 'drivers/net/wireless/ath/dfs_pri_detector.c')
-rw-r--r--drivers/net/wireless/ath/dfs_pri_detector.c429
1 files changed, 429 insertions, 0 deletions
diff --git a/drivers/net/wireless/ath/dfs_pri_detector.c b/drivers/net/wireless/ath/dfs_pri_detector.c
new file mode 100644
index 000000000000..43b608178884
--- /dev/null
+++ b/drivers/net/wireless/ath/dfs_pri_detector.c
@@ -0,0 +1,429 @@
+/*
+ * Copyright (c) 2012 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "ath.h"
+#include "dfs_pattern_detector.h"
+#include "dfs_pri_detector.h"
+
+struct ath_dfs_pool_stats global_dfs_pool_stats = {};
+
+#define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
+#define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
+
+/**
+ * struct pulse_elem - elements in pulse queue
+ * @ts: time stamp in usecs
+ */
+struct pulse_elem {
+ struct list_head head;
+ u64 ts;
+};
+
+/**
+ * pde_get_multiple() - get number of multiples considering a given tolerance
+ * @return factor if abs(val - factor*fraction) <= tolerance, 0 otherwise
+ */
+static u32 pde_get_multiple(u32 val, u32 fraction, u32 tolerance)
+{
+ u32 remainder;
+ u32 factor;
+ u32 delta;
+
+ if (fraction == 0)
+ return 0;
+
+ delta = (val < fraction) ? (fraction - val) : (val - fraction);
+
+ if (delta <= tolerance)
+ /* val and fraction are within tolerance */
+ return 1;
+
+ factor = val / fraction;
+ remainder = val % fraction;
+ if (remainder > tolerance) {
+ /* no exact match */
+ if ((fraction - remainder) <= tolerance)
+ /* remainder is within tolerance */
+ factor++;
+ else
+ factor = 0;
+ }
+ return factor;
+}
+
+/**
+ * DOC: Singleton Pulse and Sequence Pools
+ *
+ * Instances of pri_sequence and pulse_elem are kept in singleton pools to
+ * reduce the number of dynamic allocations. They are shared between all
+ * instances and grow up to the peak number of simultaneously used objects.
+ *
+ * Memory is freed after all references to the pools are released.
+ */
+static u32 singleton_pool_references;
+static LIST_HEAD(pulse_pool);
+static LIST_HEAD(pseq_pool);
+static DEFINE_SPINLOCK(pool_lock);
+
+static void pool_register_ref(void)
+{
+ spin_lock_bh(&pool_lock);
+ singleton_pool_references++;
+ DFS_POOL_STAT_INC(pool_reference);
+ spin_unlock_bh(&pool_lock);
+}
+
+static void pool_deregister_ref(void)
+{
+ spin_lock_bh(&pool_lock);
+ singleton_pool_references--;
+ DFS_POOL_STAT_DEC(pool_reference);
+ if (singleton_pool_references == 0) {
+ /* free singleton pools with no references left */
+ struct pri_sequence *ps, *ps0;
+ struct pulse_elem *p, *p0;
+
+ list_for_each_entry_safe(p, p0, &pulse_pool, head) {
+ list_del(&p->head);
+ DFS_POOL_STAT_DEC(pulse_allocated);
+ kfree(p);
+ }
+ list_for_each_entry_safe(ps, ps0, &pseq_pool, head) {
+ list_del(&ps->head);
+ DFS_POOL_STAT_DEC(pseq_allocated);
+ kfree(ps);
+ }
+ }
+ spin_unlock_bh(&pool_lock);
+}
+
+static void pool_put_pulse_elem(struct pulse_elem *pe)
+{
+ spin_lock_bh(&pool_lock);
+ list_add(&pe->head, &pulse_pool);
+ DFS_POOL_STAT_DEC(pulse_used);
+ spin_unlock_bh(&pool_lock);
+}
+
+static void pool_put_pseq_elem(struct pri_sequence *pse)
+{
+ spin_lock_bh(&pool_lock);
+ list_add(&pse->head, &pseq_pool);
+ DFS_POOL_STAT_DEC(pseq_used);
+ spin_unlock_bh(&pool_lock);
+}
+
+static struct pri_sequence *pool_get_pseq_elem(void)
+{
+ struct pri_sequence *pse = NULL;
+ spin_lock_bh(&pool_lock);
+ if (!list_empty(&pseq_pool)) {
+ pse = list_first_entry(&pseq_pool, struct pri_sequence, head);
+ list_del(&pse->head);
+ DFS_POOL_STAT_INC(pseq_used);
+ }
+ spin_unlock_bh(&pool_lock);
+ return pse;
+}
+
+static struct pulse_elem *pool_get_pulse_elem(void)
+{
+ struct pulse_elem *pe = NULL;
+ spin_lock_bh(&pool_lock);
+ if (!list_empty(&pulse_pool)) {
+ pe = list_first_entry(&pulse_pool, struct pulse_elem, head);
+ list_del(&pe->head);
+ DFS_POOL_STAT_INC(pulse_used);
+ }
+ spin_unlock_bh(&pool_lock);
+ return pe;
+}
+
+static struct pulse_elem *pulse_queue_get_tail(struct pri_detector *pde)
+{
+ struct list_head *l = &pde->pulses;
+ if (list_empty(l))
+ return NULL;
+ return list_entry(l->prev, struct pulse_elem, head);
+}
+
+static bool pulse_queue_dequeue(struct pri_detector *pde)
+{
+ struct pulse_elem *p = pulse_queue_get_tail(pde);
+ if (p != NULL) {
+ list_del_init(&p->head);
+ pde->count--;
+ /* give it back to pool */
+ pool_put_pulse_elem(p);
+ }
+ return (pde->count > 0);
+}
+
+/* remove pulses older than window */
+static void pulse_queue_check_window(struct pri_detector *pde)
+{
+ u64 min_valid_ts;
+ struct pulse_elem *p;
+
+ /* there is no delta time with less than 2 pulses */
+ if (pde->count < 2)
+ return;
+
+ if (pde->last_ts <= pde->window_size)
+ return;
+
+ min_valid_ts = pde->last_ts - pde->window_size;
+ while ((p = pulse_queue_get_tail(pde)) != NULL) {
+ if (p->ts >= min_valid_ts)
+ return;
+ pulse_queue_dequeue(pde);
+ }
+}
+
+static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts)
+{
+ struct pulse_elem *p = pool_get_pulse_elem();
+ if (p == NULL) {
+ p = kmalloc(sizeof(*p), GFP_ATOMIC);
+ if (p == NULL) {
+ DFS_POOL_STAT_INC(pulse_alloc_error);
+ return false;
+ }
+ DFS_POOL_STAT_INC(pulse_allocated);
+ DFS_POOL_STAT_INC(pulse_used);
+ }
+ INIT_LIST_HEAD(&p->head);
+ p->ts = ts;
+ list_add(&p->head, &pde->pulses);
+ pde->count++;
+ pde->last_ts = ts;
+ pulse_queue_check_window(pde);
+ if (pde->count >= pde->max_count)
+ pulse_queue_dequeue(pde);
+ return true;
+}
+
+static bool pseq_handler_create_sequences(struct pri_detector *pde,
+ u64 ts, u32 min_count)
+{
+ struct pulse_elem *p;
+ list_for_each_entry(p, &pde->pulses, head) {
+ struct pri_sequence ps, *new_ps;
+ struct pulse_elem *p2;
+ u32 tmp_false_count;
+ u64 min_valid_ts;
+ u32 delta_ts = ts - p->ts;
+
+ if (delta_ts < pde->rs->pri_min)
+ /* ignore too small pri */
+ continue;
+
+ if (delta_ts > pde->rs->pri_max)
+ /* stop on too large pri (sorted list) */
+ break;
+
+ /* build a new sequence with new potential pri */
+ ps.count = 2;
+ ps.count_falses = 0;
+ ps.first_ts = p->ts;
+ ps.last_ts = ts;
+ ps.pri = ts - p->ts;
+ ps.dur = ps.pri * (pde->rs->ppb - 1)
+ + 2 * pde->rs->max_pri_tolerance;
+
+ p2 = p;
+ tmp_false_count = 0;
+ min_valid_ts = ts - ps.dur;
+ /* check which past pulses are candidates for new sequence */
+ list_for_each_entry_continue(p2, &pde->pulses, head) {
+ u32 factor;
+ if (p2->ts < min_valid_ts)
+ /* stop on crossing window border */
+ break;
+ /* check if pulse match (multi)PRI */
+ factor = pde_get_multiple(ps.last_ts - p2->ts, ps.pri,
+ pde->rs->max_pri_tolerance);
+ if (factor > 0) {
+ ps.count++;
+ ps.first_ts = p2->ts;
+ /*
+ * on match, add the intermediate falses
+ * and reset counter
+ */
+ ps.count_falses += tmp_false_count;
+ tmp_false_count = 0;
+ } else {
+ /* this is a potential false one */
+ tmp_false_count++;
+ }
+ }
+ if (ps.count < min_count)
+ /* did not reach minimum count, drop sequence */
+ continue;
+
+ /* this is a valid one, add it */
+ ps.deadline_ts = ps.first_ts + ps.dur;
+ new_ps = pool_get_pseq_elem();
+ if (new_ps == NULL) {
+ new_ps = kmalloc(sizeof(*new_ps), GFP_ATOMIC);
+ if (new_ps == NULL) {
+ DFS_POOL_STAT_INC(pseq_alloc_error);
+ return false;
+ }
+ DFS_POOL_STAT_INC(pseq_allocated);
+ DFS_POOL_STAT_INC(pseq_used);
+ }
+ memcpy(new_ps, &ps, sizeof(ps));
+ INIT_LIST_HEAD(&new_ps->head);
+ list_add(&new_ps->head, &pde->sequences);
+ }
+ return true;
+}
+
+/* check new ts and add to all matching existing sequences */
+static u32
+pseq_handler_add_to_existing_seqs(struct pri_detector *pde, u64 ts)
+{
+ u32 max_count = 0;
+ struct pri_sequence *ps, *ps2;
+ list_for_each_entry_safe(ps, ps2, &pde->sequences, head) {
+ u32 delta_ts;
+ u32 factor;
+
+ /* first ensure that sequence is within window */
+ if (ts > ps->deadline_ts) {
+ list_del_init(&ps->head);
+ pool_put_pseq_elem(ps);
+ continue;
+ }
+
+ delta_ts = ts - ps->last_ts;
+ factor = pde_get_multiple(delta_ts, ps->pri,
+ pde->rs->max_pri_tolerance);
+ if (factor > 0) {
+ ps->last_ts = ts;
+ ps->count++;
+
+ if (max_count < ps->count)
+ max_count = ps->count;
+ } else {
+ ps->count_falses++;
+ }
+ }
+ return max_count;
+}
+
+static struct pri_sequence *
+pseq_handler_check_detection(struct pri_detector *pde)
+{
+ struct pri_sequence *ps;
+
+ if (list_empty(&pde->sequences))
+ return NULL;
+
+ list_for_each_entry(ps, &pde->sequences, head) {
+ /*
+ * we assume to have enough matching confidence if we
+ * 1) have enough pulses
+ * 2) have more matching than false pulses
+ */
+ if ((ps->count >= pde->rs->ppb_thresh) &&
+ (ps->count * pde->rs->num_pri >= ps->count_falses))
+ return ps;
+ }
+ return NULL;
+}
+
+
+/* free pulse queue and sequences list and give objects back to pools */
+static void pri_detector_reset(struct pri_detector *pde, u64 ts)
+{
+ struct pri_sequence *ps, *ps0;
+ struct pulse_elem *p, *p0;
+ list_for_each_entry_safe(ps, ps0, &pde->sequences, head) {
+ list_del_init(&ps->head);
+ pool_put_pseq_elem(ps);
+ }
+ list_for_each_entry_safe(p, p0, &pde->pulses, head) {
+ list_del_init(&p->head);
+ pool_put_pulse_elem(p);
+ }
+ pde->count = 0;
+ pde->last_ts = ts;
+}
+
+static void pri_detector_exit(struct pri_detector *de)
+{
+ pri_detector_reset(de, 0);
+ pool_deregister_ref();
+ kfree(de);
+}
+
+static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
+ struct pulse_event *event)
+{
+ u32 max_updated_seq;
+ struct pri_sequence *ps;
+ u64 ts = event->ts;
+ const struct radar_detector_specs *rs = de->rs;
+
+ /* ignore pulses not within width range */
+ if ((rs->width_min > event->width) || (rs->width_max < event->width))
+ return NULL;
+
+ if ((ts - de->last_ts) < rs->max_pri_tolerance)
+ /* if delta to last pulse is too short, don't use this pulse */
+ return NULL;
+ de->last_ts = ts;
+
+ max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts);
+
+ if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) {
+ pri_detector_reset(de, ts);
+ return NULL;
+ }
+
+ ps = pseq_handler_check_detection(de);
+
+ if (ps == NULL)
+ pulse_queue_enqueue(de, ts);
+
+ return ps;
+}
+
+struct pri_detector *pri_detector_init(const struct radar_detector_specs *rs)
+{
+ struct pri_detector *de;
+
+ de = kzalloc(sizeof(*de), GFP_ATOMIC);
+ if (de == NULL)
+ return NULL;
+ de->exit = pri_detector_exit;
+ de->add_pulse = pri_detector_add_pulse;
+ de->reset = pri_detector_reset;
+
+ INIT_LIST_HEAD(&de->sequences);
+ INIT_LIST_HEAD(&de->pulses);
+ de->window_size = rs->pri_max * rs->ppb * rs->num_pri;
+ de->max_count = rs->ppb * 2;
+ de->rs = rs;
+
+ pool_register_ref();
+ return de;
+}