|
Lines 619-626
int hci_dev_reset(__u16 dev)
Link Here
|
| 619 |
if (hdev->flush) |
619 |
if (hdev->flush) |
| 620 |
hdev->flush(hdev); |
620 |
hdev->flush(hdev); |
| 621 |
|
621 |
|
| 622 |
atomic_set(&hdev->cmd_cnt, 1); |
622 |
atomic_set(&hdev->cmd_cnt, 1); |
| 623 |
hdev->acl_cnt = 0; hdev->sco_cnt = 0; |
623 |
atomic_set(&hdev->sco_cnt, 0); |
|
|
624 |
hdev->acl_cnt = 0; |
| 624 |
|
625 |
|
| 625 |
if (!test_bit(HCI_RAW, &hdev->flags)) |
626 |
if (!test_bit(HCI_RAW, &hdev->flags)) |
| 626 |
ret = __hci_request(hdev, hci_reset_req, 0, |
627 |
ret = __hci_request(hdev, hci_reset_req, 0, |
|
Lines 1012-1018
static int hci_send_frame(struct sk_buff *skb)
Link Here
|
| 1012 |
hci_send_to_sock(hdev, skb); |
1013 |
hci_send_to_sock(hdev, skb); |
| 1013 |
} |
1014 |
} |
| 1014 |
|
1015 |
|
| 1015 |
/* Get rid of skb owner, prior to sending to the driver. */ |
1016 |
/* Get rid of skb owner, prior to sending to the driver. */ |
| 1016 |
skb_orphan(skb); |
1017 |
skb_orphan(skb); |
| 1017 |
|
1018 |
|
| 1018 |
return hdev->send(skb); |
1019 |
return hdev->send(skb); |
|
Lines 1096-1102
int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
Link Here
|
| 1096 |
/* Non fragmented */ |
1097 |
/* Non fragmented */ |
| 1097 |
BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); |
1098 |
BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); |
| 1098 |
|
1099 |
|
| 1099 |
skb_queue_tail(&conn->data_q, skb); |
1100 |
skb_queue_tail(&conn->out_q, skb); |
| 1100 |
} else { |
1101 |
} else { |
| 1101 |
/* Fragmented */ |
1102 |
/* Fragmented */ |
| 1102 |
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); |
1103 |
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); |
|
Lines 1104-1112
int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
Link Here
|
| 1104 |
skb_shinfo(skb)->frag_list = NULL; |
1105 |
skb_shinfo(skb)->frag_list = NULL; |
| 1105 |
|
1106 |
|
| 1106 |
/* Queue all fragments atomically */ |
1107 |
/* Queue all fragments atomically */ |
| 1107 |
spin_lock_bh(&conn->data_q.lock); |
1108 |
spin_lock_bh(&conn->out_q.lock); |
| 1108 |
|
1109 |
|
| 1109 |
__skb_queue_tail(&conn->data_q, skb); |
1110 |
__skb_queue_tail(&conn->out_q, skb); |
| 1110 |
do { |
1111 |
do { |
| 1111 |
skb = list; list = list->next; |
1112 |
skb = list; list = list->next; |
| 1112 |
|
1113 |
|
|
Lines 1116-1125
int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
Link Here
|
| 1116 |
|
1117 |
|
| 1117 |
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); |
1118 |
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); |
| 1118 |
|
1119 |
|
| 1119 |
__skb_queue_tail(&conn->data_q, skb); |
1120 |
__skb_queue_tail(&conn->out_q, skb); |
| 1120 |
} while (list); |
1121 |
} while (list); |
| 1121 |
|
1122 |
|
| 1122 |
spin_unlock_bh(&conn->data_q.lock); |
1123 |
spin_unlock_bh(&conn->out_q.lock); |
| 1123 |
} |
1124 |
} |
| 1124 |
|
1125 |
|
| 1125 |
hci_sched_tx(hdev); |
1126 |
hci_sched_tx(hdev); |
|
Lines 1132-1145
int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Link Here
|
| 1132 |
{ |
1133 |
{ |
| 1133 |
struct hci_dev *hdev = conn->hdev; |
1134 |
struct hci_dev *hdev = conn->hdev; |
| 1134 |
struct hci_sco_hdr hdr; |
1135 |
struct hci_sco_hdr hdr; |
| 1135 |
|
1136 |
ktime_t now = conn->tx_timer.base->get_time(); |
| 1136 |
BT_DBG("%s len %d", hdev->name, skb->len); |
1137 |
#ifdef CONFIG_BT_HCI_CORE_DEBUG |
|
|
1138 |
ktime_t timer_exp = conn->tx_timer.expires; |
| 1139 |
BT_DBG("conn %p skb %p, timer %5lu.%06lu", conn, skb, |
| 1140 |
(unsigned long) timer_exp.tv64, |
| 1141 |
do_div(timer_exp.tv64, NSEC_PER_SEC) / 1000); |
| 1142 |
#endif |
| 1137 |
|
1143 |
|
| 1138 |
if (skb->len > hdev->sco_mtu) { |
1144 |
if (skb->len > hdev->sco_mtu) { |
| 1139 |
kfree_skb(skb); |
1145 |
kfree_skb(skb); |
| 1140 |
return -EINVAL; |
1146 |
return -EINVAL; |
| 1141 |
} |
1147 |
} |
| 1142 |
|
1148 |
|
|
|
1149 |
/* Criteria for underrun condition : more than 100 ms late */ |
| 1150 |
if(conn->tx_timer.expires.tv64 + NSEC_PER_SEC / 10 <= now.tv64) { |
| 1151 |
/* We are under underrun condition, just we do a clean start */ |
| 1152 |
conn->tx_timer.expires = now; |
| 1153 |
} |
| 1154 |
|
| 1143 |
hdr.handle = __cpu_to_le16(conn->handle); |
1155 |
hdr.handle = __cpu_to_le16(conn->handle); |
| 1144 |
hdr.dlen = skb->len; |
1156 |
hdr.dlen = skb->len; |
| 1145 |
|
1157 |
|
|
Lines 1148-1154
int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Link Here
|
| 1148 |
|
1160 |
|
| 1149 |
skb->dev = (void *) hdev; |
1161 |
skb->dev = (void *) hdev; |
| 1150 |
bt_cb(skb)->pkt_type = HCI_SCODATA_PKT; |
1162 |
bt_cb(skb)->pkt_type = HCI_SCODATA_PKT; |
| 1151 |
skb_queue_tail(&conn->data_q, skb); |
1163 |
skb_queue_tail(&conn->out_q, skb); |
| 1152 |
hci_sched_tx(hdev); |
1164 |
hci_sched_tx(hdev); |
| 1153 |
return 0; |
1165 |
return 0; |
| 1154 |
} |
1166 |
} |
|
Lines 1156-1167
EXPORT_SYMBOL(hci_send_sco);
Link Here
|
| 1156 |
|
1168 |
|
| 1157 |
/* ---- HCI TX task (outgoing data) ---- */ |
1169 |
/* ---- HCI TX task (outgoing data) ---- */ |
| 1158 |
|
1170 |
|
| 1159 |
/* HCI Connection scheduler */ |
1171 |
/* HCI ACL Connection scheduler */ |
| 1160 |
static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) |
1172 |
static inline struct hci_conn *hci_low_sent_acl(struct hci_dev *hdev, int *quote) |
| 1161 |
{ |
1173 |
{ |
| 1162 |
struct hci_conn_hash *h = &hdev->conn_hash; |
1174 |
struct hci_conn_hash *h = &hdev->conn_hash; |
| 1163 |
struct hci_conn *conn = NULL; |
1175 |
struct hci_conn *conn = NULL; |
| 1164 |
int num = 0, min = ~0; |
1176 |
unsigned int num = 0, min = ~0; |
| 1165 |
struct list_head *p; |
1177 |
struct list_head *p; |
| 1166 |
|
1178 |
|
| 1167 |
/* We don't have to lock device here. Connections are always |
1179 |
/* We don't have to lock device here. Connections are always |
|
Lines 1170-1189
static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
Link Here
|
| 1170 |
struct hci_conn *c; |
1182 |
struct hci_conn *c; |
| 1171 |
c = list_entry(p, struct hci_conn, list); |
1183 |
c = list_entry(p, struct hci_conn, list); |
| 1172 |
|
1184 |
|
| 1173 |
if (c->type != type || c->state != BT_CONNECTED |
1185 |
BT_DBG("c->type %d c->state %d len(c->out_q) %d min %d c->sent %d", |
| 1174 |
|| skb_queue_empty(&c->data_q)) |
1186 |
c->type, c->state, skb_queue_len(&c->out_q), min, atomic_read(&c->sent)); |
|
|
1187 |
|
| 1188 |
if (c->type != ACL_LINK || c->state != BT_CONNECTED |
| 1189 |
|| skb_queue_empty(&c->out_q)) |
| 1175 |
continue; |
1190 |
continue; |
| 1176 |
num++; |
1191 |
num++; |
| 1177 |
|
1192 |
|
| 1178 |
if (c->sent < min) { |
1193 |
if (atomic_read(&c->sent) < min) { |
| 1179 |
min = c->sent; |
1194 |
min = atomic_read(&c->sent); |
| 1180 |
conn = c; |
1195 |
conn = c; |
| 1181 |
} |
1196 |
} |
| 1182 |
} |
1197 |
} |
| 1183 |
|
1198 |
|
| 1184 |
if (conn) { |
1199 |
if (conn) { |
| 1185 |
int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt); |
1200 |
int q = hdev->acl_cnt / num; |
| 1186 |
int q = cnt / num; |
|
|
| 1187 |
*quote = q ? q : 1; |
1201 |
*quote = q ? q : 1; |
| 1188 |
} else |
1202 |
} else |
| 1189 |
*quote = 0; |
1203 |
*quote = 0; |
|
Lines 1203-1209
static inline void hci_acl_tx_to(struct hci_dev *hdev)
Link Here
|
| 1203 |
/* Kill stalled connections */ |
1217 |
/* Kill stalled connections */ |
| 1204 |
list_for_each(p, &h->list) { |
1218 |
list_for_each(p, &h->list) { |
| 1205 |
c = list_entry(p, struct hci_conn, list); |
1219 |
c = list_entry(p, struct hci_conn, list); |
| 1206 |
if (c->type == ACL_LINK && c->sent) { |
1220 |
if (c->type == ACL_LINK && atomic_read(&c->sent)) { |
| 1207 |
BT_ERR("%s killing stalled ACL connection %s", |
1221 |
BT_ERR("%s killing stalled ACL connection %s", |
| 1208 |
hdev->name, batostr(&c->dst)); |
1222 |
hdev->name, batostr(&c->dst)); |
| 1209 |
hci_acl_disconn(c, 0x13); |
1223 |
hci_acl_disconn(c, 0x13); |
|
Lines 1226-1233
static inline void hci_sched_acl(struct hci_dev *hdev)
Link Here
|
| 1226 |
hci_acl_tx_to(hdev); |
1240 |
hci_acl_tx_to(hdev); |
| 1227 |
} |
1241 |
} |
| 1228 |
|
1242 |
|
| 1229 |
while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) { |
1243 |
while (hdev->acl_cnt && (conn = hci_low_sent_acl(hdev, "e))) { |
| 1230 |
while (quote-- && (skb = skb_dequeue(&conn->data_q))) { |
1244 |
while (quote-- && (skb = skb_dequeue(&conn->out_q))) { |
| 1231 |
BT_DBG("skb %p len %d", skb, skb->len); |
1245 |
BT_DBG("skb %p len %d", skb, skb->len); |
| 1232 |
|
1246 |
|
| 1233 |
hci_conn_enter_active_mode(conn); |
1247 |
hci_conn_enter_active_mode(conn); |
|
Lines 1236-1263
static inline void hci_sched_acl(struct hci_dev *hdev)
Link Here
|
| 1236 |
hdev->acl_last_tx = jiffies; |
1250 |
hdev->acl_last_tx = jiffies; |
| 1237 |
|
1251 |
|
| 1238 |
hdev->acl_cnt--; |
1252 |
hdev->acl_cnt--; |
| 1239 |
conn->sent++; |
1253 |
atomic_inc(&conn->sent); |
| 1240 |
} |
1254 |
} |
| 1241 |
} |
1255 |
} |
| 1242 |
} |
1256 |
} |
| 1243 |
|
1257 |
|
| 1244 |
/* Schedule SCO */ |
1258 |
/* HCI SCO tx timer */ |
| 1245 |
static inline void hci_sched_sco(struct hci_dev *hdev) |
1259 |
|
|
|
1260 |
static int hci_sco_tx_timer(struct hrtimer *timer) |
| 1246 |
{ |
1261 |
{ |
| 1247 |
struct hci_conn *conn; |
1262 |
struct hci_conn *conn = container_of(timer, struct hci_conn, tx_timer); |
| 1248 |
struct sk_buff *skb; |
1263 |
#ifdef CONFIG_BT_HCI_CORE_DEBUG |
| 1249 |
int quote; |
1264 |
ktime_t now = timer->base->get_time(); |
| 1250 |
|
1265 |
|
| 1251 |
BT_DBG("%s", hdev->name); |
1266 |
BT_DBG("%s, conn %p, time %5lu.%06lu", conn->hdev->name, conn, |
|
|
1267 |
(unsigned long) now.tv64, |
| 1268 |
do_div(now.tv64, NSEC_PER_SEC) / 1000); |
| 1269 |
#endif |
| 1252 |
|
1270 |
|
| 1253 |
while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) { |
1271 |
if(atomic_read(&conn->sent) > 0) { |
| 1254 |
while (quote-- && (skb = skb_dequeue(&conn->data_q))) { |
1272 |
atomic_dec(&conn->sent); |
| 1255 |
BT_DBG("skb %p len %d", skb, skb->len); |
1273 |
atomic_inc(&conn->hdev->sco_cnt); |
| 1256 |
hci_send_frame(skb); |
1274 |
hci_sched_tx(conn->hdev); |
|
|
1275 |
} |
| 1276 |
return HRTIMER_NORESTART; |
| 1277 |
} |
| 1278 |
|
| 1279 |
/* HCI SCO Connection scheduler */ |
| 1257 |
|
1280 |
|
| 1258 |
conn->sent++; |
1281 |
static inline void hci_sched_sco(struct hci_dev *hdev) |
| 1259 |
if (conn->sent == ~0) |
1282 |
{ |
| 1260 |
conn->sent = 0; |
1283 |
struct hci_conn_hash *h = &hdev->conn_hash; |
|
|
1284 |
struct sk_buff *skb; |
| 1285 |
struct list_head *p; |
| 1286 |
struct hci_conn *c; |
| 1287 |
|
| 1288 |
BT_DBG("%s", hdev->name); |
| 1289 |
|
| 1290 |
/* We don't have to lock device here. Connections are always |
| 1291 |
* added and removed with TX task disabled. */ |
| 1292 |
list_for_each(p, &h->list) { |
| 1293 |
c = list_entry(p, struct hci_conn, list); |
| 1294 |
|
| 1295 |
/* SCO scheduling algorithm makes sure there is never more than |
| 1296 |
1 outstanding packet for each connection */ |
| 1297 |
if (c->type == SCO_LINK && atomic_read(&c->sent) < 1 && c->state == BT_CONNECTED) |
| 1298 |
{ |
| 1299 |
if(atomic_read(&hdev->sco_cnt) > 0) { |
| 1300 |
if((skb = skb_dequeue(&c->out_q)) != NULL) { |
| 1301 |
ktime_t now, pkt_time; |
| 1302 |
|
| 1303 |
hci_send_frame(skb); |
| 1304 |
|
| 1305 |
atomic_inc(&c->sent); |
| 1306 |
atomic_dec(&hdev->sco_cnt); |
| 1307 |
|
| 1308 |
c->tx_timer.function = hci_sco_tx_timer; |
| 1309 |
|
| 1310 |
pkt_time = |
| 1311 |
ktime_set(0, NSEC_PER_SEC / 16000 * (skb->len - HCI_SCO_HDR_SIZE)); |
| 1312 |
now = c->tx_timer.base->get_time(); |
| 1313 |
|
| 1314 |
c->tx_timer.expires.tv64 += pkt_time.tv64; |
| 1315 |
if(c->tx_timer.expires.tv64 > now.tv64) { |
| 1316 |
hrtimer_restart(&c->tx_timer); |
| 1317 |
} |
| 1318 |
else { |
| 1319 |
/* Timer is to expire in the past - this can happen if timer base |
| 1320 |
precision is less than pkt_time. In this case we force timer |
| 1321 |
expiration by calling its expires function */ |
| 1322 |
c->tx_timer.function(&c->tx_timer); |
| 1323 |
} |
| 1324 |
} |
| 1325 |
} |
| 1261 |
} |
1326 |
} |
| 1262 |
} |
1327 |
} |
| 1263 |
} |
1328 |
} |
|
Lines 1269-1282
static void hci_tx_task(unsigned long arg)
Link Here
|
| 1269 |
|
1334 |
|
| 1270 |
read_lock(&hci_task_lock); |
1335 |
read_lock(&hci_task_lock); |
| 1271 |
|
1336 |
|
| 1272 |
BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt); |
1337 |
BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, atomic_read(&hdev->sco_cnt)); |
| 1273 |
|
1338 |
|
| 1274 |
/* Schedule queues and send stuff to HCI driver */ |
1339 |
/* Schedule queues and send stuff to HCI driver */ |
| 1275 |
|
1340 |
|
| 1276 |
hci_sched_acl(hdev); |
|
|
| 1277 |
|
| 1278 |
hci_sched_sco(hdev); |
1341 |
hci_sched_sco(hdev); |
| 1279 |
|
1342 |
|
|
|
1343 |
hci_sched_acl(hdev); |
| 1344 |
|
| 1280 |
/* Send next queued raw (unknown type) packet */ |
1345 |
/* Send next queued raw (unknown type) packet */ |
| 1281 |
while ((skb = skb_dequeue(&hdev->raw_q))) |
1346 |
while ((skb = skb_dequeue(&hdev->raw_q))) |
| 1282 |
hci_send_frame(skb); |
1347 |
hci_send_frame(skb); |