Line 0
Link Here
|
|
|
1 |
/* |
2 |
BlueZ - Bluetooth protocol stack for Linux |
3 |
Copyright (C) 2000-2001 Qualcomm Incorporated |
4 |
|
5 |
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> |
6 |
|
7 |
This program is free software; you can redistribute it and/or modify |
8 |
it under the terms of the GNU General Public License version 2 as |
9 |
published by the Free Software Foundation; |
10 |
|
11 |
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
12 |
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
13 |
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. |
14 |
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY |
15 |
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES |
16 |
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
17 |
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
18 |
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
19 |
|
20 |
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, |
21 |
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS |
22 |
SOFTWARE IS DISCLAIMED. |
23 |
*/ |
24 |
|
25 |
#ifndef __HCI_CORE_H |
26 |
#define __HCI_CORE_H |
27 |
|
28 |
#include <linux/hrtimer.h> |
29 |
|
30 |
#include <net/bluetooth/hci.h> |
31 |
|
32 |
/* HCI upper protocols */ |
33 |
#define HCI_PROTO_L2CAP 0 |
34 |
#define HCI_PROTO_SCO 1 |
35 |
|
36 |
/* HCI Core structures */ |
37 |
struct inquiry_data { |
38 |
bdaddr_t bdaddr; |
39 |
__u8 pscan_rep_mode; |
40 |
__u8 pscan_period_mode; |
41 |
__u8 pscan_mode; |
42 |
__u8 dev_class[3]; |
43 |
__le16 clock_offset; |
44 |
__s8 rssi; |
45 |
}; |
46 |
|
47 |
struct inquiry_entry { |
48 |
struct inquiry_entry *next; |
49 |
__u32 timestamp; |
50 |
struct inquiry_data data; |
51 |
}; |
52 |
|
53 |
struct inquiry_cache { |
54 |
spinlock_t lock; |
55 |
__u32 timestamp; |
56 |
struct inquiry_entry *list; |
57 |
}; |
58 |
|
59 |
struct hci_conn_hash { |
60 |
struct list_head list; |
61 |
spinlock_t lock; |
62 |
unsigned int acl_num; |
63 |
unsigned int sco_num; |
64 |
}; |
65 |
|
66 |
struct hci_dev { |
67 |
struct list_head list; |
68 |
spinlock_t lock; |
69 |
atomic_t refcnt; |
70 |
|
71 |
char name[8]; |
72 |
unsigned long flags; |
73 |
__u16 id; |
74 |
__u8 type; |
75 |
bdaddr_t bdaddr; |
76 |
__u8 features[8]; |
77 |
__u8 hci_ver; |
78 |
__u16 hci_rev; |
79 |
__u16 manufacturer; |
80 |
__u16 voice_setting; |
81 |
|
82 |
__u16 pkt_type; |
83 |
__u16 link_policy; |
84 |
__u16 link_mode; |
85 |
|
86 |
__u32 idle_timeout; |
87 |
__u16 sniff_min_interval; |
88 |
__u16 sniff_max_interval; |
89 |
|
90 |
unsigned long quirks; |
91 |
|
92 |
atomic_t cmd_cnt; |
93 |
/* Number of available controller buffers for ACL packets */ |
94 |
unsigned int acl_cnt; |
95 |
/* Number of available controller buffers for SCO packets */ |
96 |
atomic_t sco_cnt; |
97 |
|
98 |
/* Maximum transmition unit for ACL packets */ |
99 |
unsigned int acl_mtu; |
100 |
/* Maximum transmition unit for SCO packets */ |
101 |
unsigned int sco_mtu; |
102 |
/* Maximum number of ACL packets the controller is able to buffer */ |
103 |
unsigned int acl_pkts; |
104 |
/* Maximum number of SCO packets the controller is able to buffer */ |
105 |
unsigned int sco_pkts; |
106 |
|
107 |
unsigned long cmd_last_tx; |
108 |
unsigned long acl_last_tx; |
109 |
unsigned long sco_last_tx; |
110 |
|
111 |
struct tasklet_struct cmd_task; |
112 |
struct tasklet_struct rx_task; |
113 |
struct tasklet_struct tx_task; |
114 |
|
115 |
struct sk_buff_head rx_q; |
116 |
struct sk_buff_head raw_q; |
117 |
struct sk_buff_head cmd_q; |
118 |
|
119 |
struct sk_buff *sent_cmd; |
120 |
|
121 |
struct semaphore req_lock; |
122 |
wait_queue_head_t req_wait_q; |
123 |
__u32 req_status; |
124 |
__u32 req_result; |
125 |
|
126 |
struct inquiry_cache inq_cache; |
127 |
struct hci_conn_hash conn_hash; |
128 |
|
129 |
struct hci_dev_stats stat; |
130 |
|
131 |
struct sk_buff_head driver_init; |
132 |
|
133 |
void *driver_data; |
134 |
void *core_data; |
135 |
|
136 |
atomic_t promisc; |
137 |
|
138 |
struct device *parent; |
139 |
struct device dev; |
140 |
|
141 |
struct module *owner; |
142 |
|
143 |
int (*open)(struct hci_dev *hdev); |
144 |
int (*close)(struct hci_dev *hdev); |
145 |
int (*flush)(struct hci_dev *hdev); |
146 |
int (*send)(struct sk_buff *skb); |
147 |
void (*destruct)(struct hci_dev *hdev); |
148 |
void (*notify)(struct hci_dev *hdev, unsigned int evt); |
149 |
int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg); |
150 |
}; |
151 |
|
152 |
struct hci_conn { |
153 |
struct list_head list; |
154 |
|
155 |
atomic_t refcnt; |
156 |
|
157 |
bdaddr_t dst; |
158 |
__u16 handle; |
159 |
__u16 state; |
160 |
__u8 mode; |
161 |
__u8 type; |
162 |
__u8 out; |
163 |
__u8 attempt; |
164 |
__u8 dev_class[3]; |
165 |
__u8 features[8]; |
166 |
__u16 interval; |
167 |
__u16 link_policy; |
168 |
__u32 link_mode; |
169 |
__u8 power_save; |
170 |
unsigned long pend; |
171 |
|
172 |
struct timer_list disc_timer; |
173 |
struct timer_list idle_timer; |
174 |
|
175 |
struct work_struct work; |
176 |
|
177 |
struct device dev; |
178 |
|
179 |
struct hci_dev *hdev; |
180 |
void *l2cap_data; |
181 |
void *sco_data; |
182 |
void *priv; |
183 |
|
184 |
struct hci_conn *link; |
185 |
}; |
186 |
|
187 |
extern struct hci_proto *hci_proto[]; |
188 |
extern struct list_head hci_dev_list; |
189 |
extern struct list_head hci_cb_list; |
190 |
extern rwlock_t hci_dev_list_lock; |
191 |
extern rwlock_t hci_cb_list_lock; |
192 |
|
193 |
/* ----- Inquiry cache ----- */ |
194 |
#define INQUIRY_CACHE_AGE_MAX (HZ*30) // 30 seconds |
195 |
#define INQUIRY_ENTRY_AGE_MAX (HZ*60) // 60 seconds |
196 |
|
197 |
#define inquiry_cache_lock(c) spin_lock(&c->lock) |
198 |
#define inquiry_cache_unlock(c) spin_unlock(&c->lock) |
199 |
#define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock) |
200 |
#define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock) |
201 |
|
202 |
static inline void inquiry_cache_init(struct hci_dev *hdev) |
203 |
{ |
204 |
struct inquiry_cache *c = &hdev->inq_cache; |
205 |
spin_lock_init(&c->lock); |
206 |
c->list = NULL; |
207 |
} |
208 |
|
209 |
static inline int inquiry_cache_empty(struct hci_dev *hdev) |
210 |
{ |
211 |
struct inquiry_cache *c = &hdev->inq_cache; |
212 |
return (c->list == NULL); |
213 |
} |
214 |
|
215 |
static inline long inquiry_cache_age(struct hci_dev *hdev) |
216 |
{ |
217 |
struct inquiry_cache *c = &hdev->inq_cache; |
218 |
return jiffies - c->timestamp; |
219 |
} |
220 |
|
221 |
static inline long inquiry_entry_age(struct inquiry_entry *e) |
222 |
{ |
223 |
return jiffies - e->timestamp; |
224 |
} |
225 |
|
226 |
struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr); |
227 |
void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data); |
228 |
|
229 |
/* ----- HCI Connections ----- */ |
230 |
enum { |
231 |
HCI_CONN_AUTH_PEND, |
232 |
HCI_CONN_ENCRYPT_PEND, |
233 |
HCI_CONN_RSWITCH_PEND, |
234 |
HCI_CONN_MODE_CHANGE_PEND, |
235 |
}; |
236 |
|
237 |
static inline void hci_conn_hash_init(struct hci_dev *hdev) |
238 |
{ |
239 |
struct hci_conn_hash *h = &hdev->conn_hash; |
240 |
INIT_LIST_HEAD(&h->list); |
241 |
spin_lock_init(&h->lock); |
242 |
h->acl_num = 0; |
243 |
h->sco_num = 0; |
244 |
} |
245 |
|
246 |
static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c) |
247 |
{ |
248 |
struct hci_conn_hash *h = &hdev->conn_hash; |
249 |
list_add(&c->list, &h->list); |
250 |
if (c->type == ACL_LINK) |
251 |
h->acl_num++; |
252 |
else |
253 |
h->sco_num++; |
254 |
} |
255 |
|
256 |
static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c) |
257 |
{ |
258 |
struct hci_conn_hash *h = &hdev->conn_hash; |
259 |
list_del(&c->list); |
260 |
if (c->type == ACL_LINK) |
261 |
h->acl_num--; |
262 |
else |
263 |
h->sco_num--; |
264 |
} |
265 |
|
266 |
static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev, |
267 |
__u16 handle) |
268 |
{ |
269 |
struct hci_conn_hash *h = &hdev->conn_hash; |
270 |
struct list_head *p; |
271 |
struct hci_conn *c; |
272 |
|
273 |
list_for_each(p, &h->list) { |
274 |
c = list_entry(p, struct hci_conn, list); |
275 |
if (c->handle == handle) |
276 |
return c; |
277 |
} |
278 |
return NULL; |
279 |
} |
280 |
|
281 |
static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev, |
282 |
__u8 type, bdaddr_t *ba) |
283 |
{ |
284 |
struct hci_conn_hash *h = &hdev->conn_hash; |
285 |
struct list_head *p; |
286 |
struct hci_conn *c; |
287 |
|
288 |
list_for_each(p, &h->list) { |
289 |
c = list_entry(p, struct hci_conn, list); |
290 |
if (c->type == type && !bacmp(&c->dst, ba)) |
291 |
return c; |
292 |
} |
293 |
return NULL; |
294 |
} |
295 |
|
296 |
static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev, |
297 |
__u8 type, __u16 state) |
298 |
{ |
299 |
struct hci_conn_hash *h = &hdev->conn_hash; |
300 |
struct list_head *p; |
301 |
struct hci_conn *c; |
302 |
|
303 |
list_for_each(p, &h->list) { |
304 |
c = list_entry(p, struct hci_conn, list); |
305 |
if (c->type == type && c->state == state) |
306 |
return c; |
307 |
} |
308 |
return NULL; |
309 |
} |
310 |
|
311 |
void hci_acl_connect(struct hci_conn *conn); |
312 |
void hci_acl_disconn(struct hci_conn *conn, __u8 reason); |
313 |
void hci_add_sco(struct hci_conn *conn, __u16 handle); |
314 |
|
315 |
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst); |
316 |
int hci_conn_del(struct hci_conn *conn); |
317 |
void hci_conn_hash_flush(struct hci_dev *hdev); |
318 |
|
319 |
struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *src); |
320 |
int hci_conn_auth(struct hci_conn *conn); |
321 |
int hci_conn_encrypt(struct hci_conn *conn); |
322 |
int hci_conn_change_link_key(struct hci_conn *conn); |
323 |
int hci_conn_switch_role(struct hci_conn *conn, uint8_t role); |
324 |
|
325 |
void hci_conn_enter_active_mode(struct hci_conn *conn); |
326 |
void hci_conn_enter_sniff_mode(struct hci_conn *conn); |
327 |
|
328 |
static inline void hci_conn_hold(struct hci_conn *conn) |
329 |
{ |
330 |
atomic_inc(&conn->refcnt); |
331 |
del_timer(&conn->disc_timer); |
332 |
} |
333 |
|
334 |
static inline void hci_conn_put(struct hci_conn *conn) |
335 |
{ |
336 |
if (atomic_dec_and_test(&conn->refcnt)) { |
337 |
unsigned long timeo; |
338 |
if (conn->type == ACL_LINK) { |
339 |
del_timer(&conn->idle_timer); |
340 |
if (conn->state == BT_CONNECTED) { |
341 |
timeo = msecs_to_jiffies(HCI_DISCONN_TIMEOUT); |
342 |
if (!conn->out) |
343 |
timeo *= 2; |
344 |
} else |
345 |
timeo = msecs_to_jiffies(10); |
346 |
} else |
347 |
timeo = msecs_to_jiffies(10); |
348 |
mod_timer(&conn->disc_timer, jiffies + timeo); |
349 |
} |
350 |
} |
351 |
|
352 |
/* ----- HCI tasks ----- */ |
353 |
static inline void hci_sched_cmd(struct hci_dev *hdev) |
354 |
{ |
355 |
tasklet_schedule(&hdev->cmd_task); |
356 |
} |
357 |
|
358 |
static inline void hci_sched_rx(struct hci_dev *hdev) |
359 |
{ |
360 |
tasklet_schedule(&hdev->rx_task); |
361 |
} |
362 |
|
363 |
static inline void hci_sched_tx(struct hci_dev *hdev) |
364 |
{ |
365 |
tasklet_schedule(&hdev->tx_task); |
366 |
} |
367 |
|
368 |
/* ----- HCI Devices ----- */ |
369 |
static inline void __hci_dev_put(struct hci_dev *d) |
370 |
{ |
371 |
if (atomic_dec_and_test(&d->refcnt)) |
372 |
d->destruct(d); |
373 |
} |
374 |
|
375 |
static inline void hci_dev_put(struct hci_dev *d) |
376 |
{ |
377 |
__hci_dev_put(d); |
378 |
module_put(d->owner); |
379 |
} |
380 |
|
381 |
static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d) |
382 |
{ |
383 |
atomic_inc(&d->refcnt); |
384 |
return d; |
385 |
} |
386 |
|
387 |
static inline struct hci_dev *hci_dev_hold(struct hci_dev *d) |
388 |
{ |
389 |
if (try_module_get(d->owner)) |
390 |
return __hci_dev_hold(d); |
391 |
return NULL; |
392 |
} |
393 |
|
394 |
#define hci_dev_lock(d) spin_lock(&d->lock) |
395 |
#define hci_dev_unlock(d) spin_unlock(&d->lock) |
396 |
#define hci_dev_lock_bh(d) spin_lock_bh(&d->lock) |
397 |
#define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock) |
398 |
|
399 |
struct hci_dev *hci_dev_get(int index); |
400 |
struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst); |
401 |
|
402 |
struct hci_dev *hci_alloc_dev(void); |
403 |
void hci_free_dev(struct hci_dev *hdev); |
404 |
int hci_register_dev(struct hci_dev *hdev); |
405 |
int hci_unregister_dev(struct hci_dev *hdev); |
406 |
int hci_suspend_dev(struct hci_dev *hdev); |
407 |
int hci_resume_dev(struct hci_dev *hdev); |
408 |
int hci_dev_open(__u16 dev); |
409 |
int hci_dev_close(__u16 dev); |
410 |
int hci_dev_reset(__u16 dev); |
411 |
int hci_dev_reset_stat(__u16 dev); |
412 |
int hci_dev_cmd(unsigned int cmd, void __user *arg); |
413 |
int hci_get_dev_list(void __user *arg); |
414 |
int hci_get_dev_info(void __user *arg); |
415 |
int hci_get_conn_list(void __user *arg); |
416 |
int hci_get_conn_info(struct hci_dev *hdev, void __user *arg); |
417 |
int hci_inquiry(void __user *arg); |
418 |
|
419 |
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); |
420 |
|
421 |
/* Receive frame from HCI drivers */ |
422 |
static inline int hci_recv_frame(struct sk_buff *skb) |
423 |
{ |
424 |
struct hci_dev *hdev = (struct hci_dev *) skb->dev; |
425 |
if (!hdev || (!test_bit(HCI_UP, &hdev->flags) |
426 |
&& !test_bit(HCI_INIT, &hdev->flags))) { |
427 |
kfree_skb(skb); |
428 |
return -ENXIO; |
429 |
} |
430 |
|
431 |
/* Incomming skb */ |
432 |
bt_cb(skb)->incoming = 1; |
433 |
|
434 |
/* Time stamp */ |
435 |
__net_timestamp(skb); |
436 |
|
437 |
/* Queue frame for rx task */ |
438 |
skb_queue_tail(&hdev->rx_q, skb); |
439 |
hci_sched_rx(hdev); |
440 |
return 0; |
441 |
} |
442 |
|
443 |
int hci_register_sysfs(struct hci_dev *hdev); |
444 |
void hci_unregister_sysfs(struct hci_dev *hdev); |
445 |
void hci_conn_add_sysfs(struct hci_conn *conn); |
446 |
void hci_conn_del_sysfs(struct hci_conn *conn); |
447 |
|
448 |
#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev)) |
449 |
|
450 |
/* ----- LMP capabilities ----- */ |
451 |
#define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH) |
452 |
#define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT) |
453 |
#define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF) |
454 |
#define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR) |
455 |
|
456 |
/* ----- HCI protocols ----- */ |
457 |
struct hci_proto { |
458 |
char *name; |
459 |
unsigned int id; |
460 |
unsigned long flags; |
461 |
|
462 |
void *priv; |
463 |
|
464 |
int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type); |
465 |
int (*connect_cfm) (struct hci_conn *conn, __u8 status); |
466 |
int (*disconn_ind) (struct hci_conn *conn, __u8 reason); |
467 |
int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags); |
468 |
int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb); |
469 |
int (*auth_cfm) (struct hci_conn *conn, __u8 status); |
470 |
int (*encrypt_cfm) (struct hci_conn *conn, __u8 status); |
471 |
}; |
472 |
|
473 |
static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type) |
474 |
{ |
475 |
register struct hci_proto *hp; |
476 |
int mask = 0; |
477 |
|
478 |
hp = hci_proto[HCI_PROTO_L2CAP]; |
479 |
if (hp && hp->connect_ind) |
480 |
mask |= hp->connect_ind(hdev, bdaddr, type); |
481 |
|
482 |
hp = hci_proto[HCI_PROTO_SCO]; |
483 |
if (hp && hp->connect_ind) |
484 |
mask |= hp->connect_ind(hdev, bdaddr, type); |
485 |
|
486 |
return mask; |
487 |
} |
488 |
|
489 |
static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status) |
490 |
{ |
491 |
register struct hci_proto *hp; |
492 |
|
493 |
hp = hci_proto[HCI_PROTO_L2CAP]; |
494 |
if (hp && hp->connect_cfm) |
495 |
hp->connect_cfm(conn, status); |
496 |
|
497 |
hp = hci_proto[HCI_PROTO_SCO]; |
498 |
if (hp && hp->connect_cfm) |
499 |
hp->connect_cfm(conn, status); |
500 |
} |
501 |
|
502 |
static inline void hci_proto_disconn_ind(struct hci_conn *conn, __u8 reason) |
503 |
{ |
504 |
register struct hci_proto *hp; |
505 |
|
506 |
hp = hci_proto[HCI_PROTO_L2CAP]; |
507 |
if (hp && hp->disconn_ind) |
508 |
hp->disconn_ind(conn, reason); |
509 |
|
510 |
hp = hci_proto[HCI_PROTO_SCO]; |
511 |
if (hp && hp->disconn_ind) |
512 |
hp->disconn_ind(conn, reason); |
513 |
} |
514 |
|
515 |
static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status) |
516 |
{ |
517 |
register struct hci_proto *hp; |
518 |
|
519 |
hp = hci_proto[HCI_PROTO_L2CAP]; |
520 |
if (hp && hp->auth_cfm) |
521 |
hp->auth_cfm(conn, status); |
522 |
|
523 |
hp = hci_proto[HCI_PROTO_SCO]; |
524 |
if (hp && hp->auth_cfm) |
525 |
hp->auth_cfm(conn, status); |
526 |
} |
527 |
|
528 |
static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status) |
529 |
{ |
530 |
register struct hci_proto *hp; |
531 |
|
532 |
hp = hci_proto[HCI_PROTO_L2CAP]; |
533 |
if (hp && hp->encrypt_cfm) |
534 |
hp->encrypt_cfm(conn, status); |
535 |
|
536 |
hp = hci_proto[HCI_PROTO_SCO]; |
537 |
if (hp && hp->encrypt_cfm) |
538 |
hp->encrypt_cfm(conn, status); |
539 |
} |
540 |
|
541 |
int hci_register_proto(struct hci_proto *hproto); |
542 |
int hci_unregister_proto(struct hci_proto *hproto); |
543 |
|
544 |
/* ----- HCI callbacks ----- */ |
545 |
struct hci_cb { |
546 |
struct list_head list; |
547 |
|
548 |
char *name; |
549 |
|
550 |
void (*auth_cfm) (struct hci_conn *conn, __u8 status); |
551 |
void (*encrypt_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt); |
552 |
void (*key_change_cfm) (struct hci_conn *conn, __u8 status); |
553 |
void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role); |
554 |
}; |
555 |
|
556 |
static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) |
557 |
{ |
558 |
struct list_head *p; |
559 |
|
560 |
hci_proto_auth_cfm(conn, status); |
561 |
|
562 |
read_lock_bh(&hci_cb_list_lock); |
563 |
list_for_each(p, &hci_cb_list) { |
564 |
struct hci_cb *cb = list_entry(p, struct hci_cb, list); |
565 |
if (cb->auth_cfm) |
566 |
cb->auth_cfm(conn, status); |
567 |
} |
568 |
read_unlock_bh(&hci_cb_list_lock); |
569 |
} |
570 |
|
571 |
static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt) |
572 |
{ |
573 |
struct list_head *p; |
574 |
|
575 |
hci_proto_encrypt_cfm(conn, status); |
576 |
|
577 |
read_lock_bh(&hci_cb_list_lock); |
578 |
list_for_each(p, &hci_cb_list) { |
579 |
struct hci_cb *cb = list_entry(p, struct hci_cb, list); |
580 |
if (cb->encrypt_cfm) |
581 |
cb->encrypt_cfm(conn, status, encrypt); |
582 |
} |
583 |
read_unlock_bh(&hci_cb_list_lock); |
584 |
} |
585 |
|
586 |
static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status) |
587 |
{ |
588 |
struct list_head *p; |
589 |
|
590 |
read_lock_bh(&hci_cb_list_lock); |
591 |
list_for_each(p, &hci_cb_list) { |
592 |
struct hci_cb *cb = list_entry(p, struct hci_cb, list); |
593 |
if (cb->key_change_cfm) |
594 |
cb->key_change_cfm(conn, status); |
595 |
} |
596 |
read_unlock_bh(&hci_cb_list_lock); |
597 |
} |
598 |
|
599 |
static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role) |
600 |
{ |
601 |
struct list_head *p; |
602 |
|
603 |
read_lock_bh(&hci_cb_list_lock); |
604 |
list_for_each(p, &hci_cb_list) { |
605 |
struct hci_cb *cb = list_entry(p, struct hci_cb, list); |
606 |
if (cb->role_switch_cfm) |
607 |
cb->role_switch_cfm(conn, status, role); |
608 |
} |
609 |
read_unlock_bh(&hci_cb_list_lock); |
610 |
} |
611 |
|
612 |
int hci_register_cb(struct hci_cb *hcb); |
613 |
int hci_unregister_cb(struct hci_cb *hcb); |
614 |
|
615 |
int hci_register_notifier(struct notifier_block *nb); |
616 |
int hci_unregister_notifier(struct notifier_block *nb); |
617 |
|
618 |
int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param); |
619 |
int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags); |
620 |
int hci_send_sco(struct hci_conn *conn, |
621 |
struct sk_buff *skb, |
622 |
int sndbufsize, |
623 |
void (*send_complete_cb)(struct hci_conn *conn)); |
624 |
|
625 |
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf); |
626 |
|
627 |
void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data); |
628 |
|
629 |
/* ----- HCI Sockets ----- */ |
630 |
void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb); |
631 |
|
632 |
/* HCI info for socket */ |
633 |
#define hci_pi(sk) ((struct hci_pinfo *) sk) |
634 |
|
635 |
struct hci_pinfo { |
636 |
struct bt_sock bt; |
637 |
struct hci_dev *hdev; |
638 |
struct hci_filter filter; |
639 |
__u32 cmsg_mask; |
640 |
}; |
641 |
|
642 |
/* HCI security filter */ |
643 |
#define HCI_SFLT_MAX_OGF 5 |
644 |
|
645 |
struct hci_sec_filter { |
646 |
__u32 type_mask; |
647 |
__u32 event_mask[2]; |
648 |
__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4]; |
649 |
}; |
650 |
|
651 |
/* ----- HCI requests ----- */ |
652 |
#define HCI_REQ_DONE 0 |
653 |
#define HCI_REQ_PEND 1 |
654 |
#define HCI_REQ_CANCELED 2 |
655 |
|
656 |
#define hci_req_lock(d) down(&d->req_lock) |
657 |
#define hci_req_unlock(d) up(&d->req_lock) |
658 |
|
659 |
void hci_req_complete(struct hci_dev *hdev, int result); |
660 |
|
661 |
#endif /* __HCI_CORE_H */ |