AFHBA404
AFHBA404 connects ACQ2106 to PCI-Express
afhba_stream_drv.c
Go to the documentation of this file.
1 /* ------------------------------------------------------------------------- */
2 /* afhba_stream_drv.c D-TACQ ACQ400 FMC DRIVER
3  * afhba_stream_drv.c
4  *
5  * Created on: 19 Jan 2015
6  * Author: pgm
7  */
8 
9 /* ------------------------------------------------------------------------- */
10 /* Copyright (C) 2015 Peter Milne, D-TACQ Solutions Ltd *
11  * <peter dot milne at D hyphen TACQ dot com> *
12  * *
13  * This program is free software; you can redistribute it and/or modify *
14  * it under the terms of Version 2 of the GNU General Public License *
15  * as published by the Free Software Foundation; *
16  * *
17  * This program is distributed in the hope that it will be useful, *
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of *
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
20  * GNU General Public License for more details. *
21  * *
22  * You should have received a copy of the GNU General Public License *
23  * along with this program; if not, write to the Free Software *
24  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
25 /* ------------------------------------------------------------------------- */
26 
27 /*
28  * prefix afs : acq fiber stream
29  */
30 
31 
32 #ifndef EXPORT_SYMTAB
33 #define EXPORT_SYMTAB
34 #include <linux/module.h>
35 #endif
36 
37 #include "acq-fiber-hba.h"
38 #include "afhba_stream_drv.h"
39 
40 #include <linux/poll.h>
41 #include <linux/version.h>
42 
43 
44 #define REVID "R1072"
45 
46 #define DEF_BUFFER_LEN 0x100000
47 
48 int RX_TO = 1*HZ;
49 module_param(RX_TO, int, 0644);
50 MODULE_PARM_DESC(RX_TO, "RX timeout (jiffies) [0.1Hz]");
51 
52 int WORK_TO = HZ/100;
53 module_param(WORK_TO, int, 0644);
55  "WORK timeout (jiffies) [50Hz] - decrease for hi fifo stat poll rate");
56 
57 int amon_jiffies = HZ/3;
59 MODULE_PARM_DESC(amon_jiffies, "aurora monitor poll rate");
60 
61 int SMOO = 7;
62 module_param(SMOO, int, 0644);
63 MODULE_PARM_DESC(SMOO, "rate smoothing factor 0..9 none..smooth");
64 
65 int stalls = 0;
66 module_param(stalls, int, 0644);
67 MODULE_PARM_DESC(stalls, "number of times ISR ran with no buffers to queue");
68 
69 int buffer_debug = 0;
71 
72 
74 module_param(nbuffers, int, 0444);
75 MODULE_PARM_DESC(nbuffers, "number of host-side buffers");
76 
79 MODULE_PARM_DESC(buffer_len, "length of each buffer in bytes");
80 
81 
84 
85 int transfer_buffers = 0x7fffffff;
87 MODULE_PARM_DESC(transfer_buffers, "number of buffers to transfer");
88 
89 int aurora_to_ms = 1000;
91 MODULE_PARM_DESC(aurora_to_ms, "timeout on aurora connect");
92 
95 MODULE_PARM_DESC(aurora_monitor, "enable to check cable state in run loop, disable for debug");
96 
97 int eot_interrupt = 0;
99 MODULE_PARM_DESC(eot_interrupt, "1: interrupt every, 0: interrupt none, N: interrupt interval");
100 
103 MODULE_PARM_DESC(cos_interrupt_ok, "1: interrupt every, 0: interrupt none, N: interrupt interval");
104 
108 
111 MODULE_PARM_DESC(dma_descriptor_ram, "descriptors in RAM not FIFO >1 :: validate on load");
112 
115 MODULE_PARM_DESC(assume_stuck_buffers_are_ok, "assume that stuck buffers are ok to release into the wild");
116 
119 MODULE_PARM_DESC(max_dma_load_retry, "number of times to retry loading descriptors. HACK: once should be enough");
120 
121 static struct file_operations afs_fops_dma;
122 static struct file_operations afs_fops_dma_poll;
123 
126 MODULE_PARM_DESC(max_empty_backlog_check, "set to one to look only at top of deck, set to two to check skips");
127 
128 
129 
132 MODULE_PARM_DESC(use_llc_multi, "use LLC for multi descriptor transfer");
133 
134 static int getOrder(int len)
135 {
136  int order;
137  len /= PAGE_SIZE;
138 
139  for (order = 0; 1 << order < len; ++order){
140  ;
141  }
142  return order;
143 }
144 
145 static int getAFDMAC_Order(int len)
146 {
147  int order;
148  len /= AFDMAC_PAGE;
149 
150  for (order = 0; 1 << order < len; ++order){
151  ;
152  }
153  return order;
154 }
155 
157 {
158  int ii;
159 
160  for (ii = 0; ii < sdev->nbuffers; ++ii){
161  u32 descr = sdev->hbx[ii].descr;
162  int len = sdev->hbx[ii].req_len;
163 
164  if (len == 0) len = sdev->buffer_len;
165  descr &= ~AFDMAC_DESC_LEN_MASK;
166  descr |= getAFDMAC_Order(len)<< AFDMAC_DESC_LEN_SHL;
167  switch(eot_interrupt){
168  case 0:
169  descr &= ~AFDMAC_DESC_EOT;
170  break;
171  case 1:
172  descr |= AFDMAC_DESC_EOT;
173  break;
174  default:
175  if (ii%eot_interrupt == 0){
176  descr |= AFDMAC_DESC_EOT;
177  }else{
178  descr &= ~AFDMAC_DESC_EOT;
179  }
180  break;
181  }
182 
183  sdev->hbx[ii].descr = descr;
184  }
185 
186  sdev->push_ram_cursor = sdev->pull_ram_cursor = 0;
187 }
188 
189 
190 
191 
192 
193 #define COPY_FROM_USER(to, from, len) \
194  if (copy_from_user(to, from, len)) { return -EFAULT; }
195 
196 #define COPY_TO_USER(to, from, len) \
197  if (copy_to_user(to, from, len)) { return -EFAULT; }
198 
199 static void write_descr(struct AFHBA_DEV *adev, unsigned offset, int idesc)
200 {
201  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
202  u32 descr = sdev->hbx[idesc].descr;
203 
204  if (sdev->job.buffers_queued < 5){
205  dev_info(pdev(adev), "write_descr(%d) [%d] offset:%04x = %08x",
206  sdev->job.buffers_queued, idesc, offset, descr);
207  }
208  DEV_DBG(pdev(adev), "ibuf %d offset:%04x = %08x", idesc, offset, descr);
209  writel(descr, adev->remote+offset);
210  /* force write posting through to avoid message backup */
211  DMA_DESC_FIFSTA_RD(adev);
212 }
213 
214 static int _write_ram_descr(struct AFHBA_DEV *adev, unsigned offset, int idesc, int *cursor)
215 /* returns "address" of entry or ERR */
216 {
217  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
218  u32 descr = sdev->hbx[idesc].descr;
219  u32 descrr;
220  int cr = *cursor;
221 
222  if (cr < sdev->nbuffers){
223  int addr = cr;
224  void* va = adev->remote + offset + cr*sizeof(unsigned);
225 
226  DEV_DBG(pdev(adev), "_write_ram_descr() ibuf %d offset:%04x = %08x cursor:%d",
227  idesc, offset, descr, cr);
228  writel(descr, va);
229  descrr = readl(va);
230  if (descr != descrr){
231  dev_err(pdev(adev), "descriptor [%4d] wrote 0x%08x read 0x%08x",
232  cr, descr, descrr);
233  }
234  *cursor = cr + 1;
235  return addr;
236  }else{
237  return -1;
238  }
239 }
240 
241 static int validate_dma_descriptor_ram(
242  struct AFHBA_DEV *adev, unsigned offset, unsigned max_id, int phase)
243 {
244  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
245  u32 descr;
246  u32 id;
247  int errors = 0;
248 
249  for (id = 0; id < max_id; ++id){
250  descr = readl(adev->remote+offset+id*sizeof(unsigned));
251  if (descr != sdev->hbx[id].descr){
252  dev_err(pdev(adev), "%s phase:%d descriptor mismatch at [%d] w:%08x r:%08x",
253  "validate_dma_descriptor_ram", phase, id, sdev->hbx[id].descr, descr);
254  errors += 1;
255  }
256  dev_dbg(pdev(adev), "%s phase:%d descriptor at [%d] w:%08x r:%08x",
257  "validate_dma_descriptor_ram", phase, id, sdev->hbx[id].descr, descr);
258  }
259  if (errors){
260  dev_err(pdev(adev), "%s descriptor errors %d out of %d",
261  "validate_dma_descriptor_ram", errors, id);
262  return errors;
263  }else{
264  dev_info(pdev(adev), "%s %d descriptors PASS",
265  "validate_dma_descriptor_ram", id);
266  return 0;
267  }
268 }
269 static void write_ram_descr(struct AFHBA_DEV *adev, unsigned offset, int idesc)
270 {
271  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
272  int addr;
273 
274  if (offset == DMA_PUSH_DESC_RAM){
275  addr = _write_ram_descr(adev, offset, idesc, &sdev->push_ram_cursor);
276  if (addr != -1){
278  }
279  }else{
280  dev_warn(pdev(adev), "write_ram_descr valid PUSH only");
281  addr = _write_ram_descr(adev, offset, idesc, &sdev->pull_ram_cursor);
282  if (addr != -1){
284  }
285  }
286 }
287 u32 _afs_read_zynqreg(struct AFHBA_DEV *adev, int regoff)
288 {
289  u32* dma_regs = (u32*)(adev->remote + ZYNQ_BASE);
290  void* va = &dma_regs[regoff];
291  u32 value = readl(va);
292  DEV_DBG(pdev(adev), "%04lx = %08x", va-adev->remote, value);
293  return adev->stream_dev->dma_regs[regoff] = value;
294 }
295 
296 void _afs_write_comreg(struct AFHBA_DEV *adev, int regoff, u32 value)
297 {
298  u32* dma_regs = (u32*)(adev->mappings[adev->remote_com_bar].va + COMMON_BASE);
299  void* va = &dma_regs[regoff];
300  DEV_DBG(pdev(adev), "%04lx = %08x", va-adev->remote, value);
301  writel(value, va);
302 }
303 
304 void _afs_write_dmareg(struct AFHBA_DEV *adev, int regoff, u32 value)
305 
306 {
307  u32* dma_regs = (u32*)(adev->remote + DMA_BASE);
308  void* va = &dma_regs[regoff];
309  DEV_DBG(pdev(adev), "%04lx = %08x", va-adev->remote, value);
310  writel(value, va);
311 }
312 
313 u32 _afs_read_dmareg(struct AFHBA_DEV *adev, int regoff)
314 {
315  u32* dma_regs = (u32*)(adev->remote + DMA_BASE);
316  void* va = &dma_regs[regoff];
317  u32 value = readl(va);
318  DEV_DBG(pdev(adev), "%04lx = %08x", va-adev->remote, value);
319  return adev->stream_dev->dma_regs[regoff] = value;
320 }
321 
322 void _afs_write_pcireg(struct AFHBA_DEV *adev, int regoff, u32 value)
323 
324 {
325  u32* dma_regs = (u32*)(adev->remote + PCIE_BASE);
326  void* va = &dma_regs[regoff];
327  DEV_DBG(pdev(adev), "%04lx = %08x", va-adev->remote, value);
328  DEV_DBG(pdev(adev), "%p = %08x", va, value);
329  writel(value, va);
330 }
331 
332 u32 _afs_read_pcireg(struct AFHBA_DEV *adev, int regoff)
333 {
334  u32* dma_regs = (u32*)(adev->remote + PCIE_BASE);
335  void* va = &dma_regs[regoff];
336  u32 value = readl(va);
337  DEV_DBG(pdev(adev), "%04lx = %08x", va-adev->remote, value);
338  return adev->stream_dev->dma_regs[regoff] = value;
339 }
340 static void afs_load_push_descriptor(struct AFHBA_DEV *adev, int idesc)
341 {
342  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
343  if (sdev->push_descr_ram){
344  if (!adev->stream_dev->job.dma_started){
345  write_ram_descr(adev, DMA_PUSH_DESC_RAM, idesc);
346  }
347  /* else .. NO ACTION, descriptor already loaded in RAM */
348  }else{
349  write_descr(adev, DMA_PUSH_DESC_FIFO, idesc);
350  }
351 }
352 
353 void afs_load_pull_descriptor(struct AFHBA_DEV *adev, int idesc)
354 {
355  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
356  if (sdev->pull_descr_ram){
357  if (!adev->stream_dev->job.dma_started){
358  write_ram_descr(adev, DMA_PULL_DESC_RAM, idesc);
359  }
360  /* else .. NO ACTION, descriptor already loaded in RAM */
361  }else{
362  write_descr(adev, DMA_PULL_DESC_FIFO, idesc);
363  }
364 }
365 
366 static void afs_init_dma_clr(struct AFHBA_DEV *adev)
367 {
368  DMA_CTRL_RD(adev);
370 }
371 
372 static void afs_configure_streaming_dma(
373  struct AFHBA_DEV *adev, enum DMA_SEL dma_sel)
374 {
375  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
376 
377  u32 dma_ctrl = DMA_CTRL_RD(adev);
378  u32 check;
379  dma_ctrl &= ~dma_pp(dma_sel, DMA_CTRL_LOW_LAT|DMA_CTRL_RECYCLE);
380  if (((dma_sel&DMA_PUSH_SEL) && sdev->push_descr_ram) ||
381  ((dma_sel&DMA_PULL_SEL) && sdev->pull_descr_ram) ){
382  dma_ctrl |= DMA_CTRL_RAM;
383  }else{
384  dma_ctrl &= ~DMA_CTRL_RAM;
385  }
386  DMA_CTRL_WR(adev, dma_ctrl);
387  check = DMA_CTRL_RD(adev);
388  if (check != dma_ctrl){
389  dev_err(pdev(adev), "%s setting DMA_CTRL w:%08x r:%08x",
390  "afs_configure_streaming_dma", dma_ctrl, check);
391  }
392 }
393 
394 static void afs_dma_set_recycle(
395  struct AFHBA_DEV *adev, enum DMA_SEL dma_sel, int enable)
396 {
397  u32 dma_ctrl = DMA_CTRL_RD(adev);
398 
399  dma_ctrl &= ~dma_pp(dma_sel, DMA_CTRL_RECYCLE);
400  DMA_CTRL_WR(adev, dma_ctrl);
401 }
402 
403 static void afs_load_llc_single_dma(
404  struct AFHBA_DEV *adev, enum DMA_SEL dma_sel, u32 pa, unsigned len)
405 {
406  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
407  u32 dma_ctrl = DMA_CTRL_RD(adev);
408  u32 len64 = ((len/64-1) + (len%64!=0));
409  u32 offset = DMA_DIR_DESC_FIFO(dma_sel);
410  u32 dma_desc;
411 
412  dev_dbg(pdev(adev), "afs_load_llc_single_dma %s 0x%08x %d",
413  sDMA_SEL(dma_sel), pa, len);
414 
415  len64 <<= AFDMAC_DESC_LEN_SHL;
416  len64 &= AFDMAC_DESC_LEN_MASK;
417 
418  dma_desc = pa&AFDMAC_DESC_ADDR_MASK;
419  dma_desc |= len64;
420  dma_desc |= sdev->shot&AFDMAC_DESC_ID_MASK;
421 
422  dma_ctrl &= ~dma_pp(dma_sel, DMA_CTRL_RAM);
423  dma_ctrl |= dma_pp(dma_sel, DMA_CTRL_LOW_LAT|DMA_CTRL_RECYCLE);
424 
425  dev_dbg(pdev(adev),
426  "afs_load_llc_single_dma len64:%08x dma_desc:%08x dma_ctrl:%08x",
427  len64, dma_desc, dma_ctrl);
428 
429  _afs_write_dmareg(adev, DMA_DIR_DESC_LEN(dma_sel), 0);
430  DMA_CTRL_WR(adev, dma_ctrl);
431  writel(dma_desc, adev->remote+offset);
432  afs_start_dma(adev, dma_sel);
433 }
434 
435 static void afs_load_dram_descriptors(
436  struct AFHBA_DEV *adev, enum DMA_SEL dma_sel,
437  struct XLLC_DEF* buffers,
438  int nbufs
439 )
441 {
442  u32 dma_ctrl = DMA_CTRL_RD(adev);
443  u32 offset = DMA_DIR_DESC_RAM(dma_sel);
444  int idesc;
445 
446  dma_ctrl &= ~dma_pp(dma_sel, DMA_CTRL_EN|DMA_CTRL_LOW_LAT|DMA_CTRL_RECYCLE);
447  dma_ctrl |= dma_pp(dma_sel, DMA_CTRL_RAM);
448  DMA_CTRL_WR(adev, dma_ctrl);
449 
450  for (idesc = 0; idesc < nbufs; ++idesc, offset += 4){
451  struct XLLC_DEF* bd = &buffers[idesc];
452  u32 dma_desc = bd->pa
453  | getAFDMAC_Order(bd->len)<< AFDMAC_DESC_LEN_SHL
454  | idesc;
455  dev_dbg(pdev(adev),"%s() [%d] 0x%08x",
456  "afs_load_dram_descriptors", idesc, dma_desc);
457  writel(dma_desc, adev->remote+offset);
458  }
459 
460  dma_ctrl |= dma_pp(dma_sel, DMA_CTRL_EN);
461  _afs_write_dmareg(adev, DMA_DIR_DESC_LEN(dma_sel), nbufs-1);
462  DMA_CTRL_WR(adev, dma_ctrl);
463  afs_start_dma(adev, dma_sel);
464 }
465 
466 
467 #define LL_MAX_CNT 16
468 #define LL_BLOCK 64
469 #define LL_NB(cnt) ((cnt)-1)
470 #define LL_MAX_LEN (LL_MAX_CNT*LL_BLOCK)
471 
472 static void afs_load_dram_descriptors_ll(
473  struct AFHBA_DEV *adev, enum DMA_SEL dma_sel,
474  struct XLLC_DEF* buffers,
475  int nbufs
476 )
478 {
479  u32 dma_ctrl = DMA_CTRL_RD(adev);
480  int cursor = 0;
481  int ib = 0;
482 
483  for ( ; ib < nbufs; ++ib){
484  struct XLLC_DEF* bd = &buffers[ib];
485  int idb = 0;
486  for ( ; idb*LL_MAX_LEN < bd->len; ++idb, ++cursor){
487  u32 dma_desc;
488  int cnt;
489  int residue = bd->len - idb*LL_MAX_LEN;
490  unsigned reg_off = DMA_DIR_DESC_RAM(dma_sel) + cursor*4;
491 
492  residue = min(residue, LL_MAX_LEN);
493  if (residue != max(residue, LL_BLOCK)){
494  dev_warn(pdev(adev), "%s() [%d] [%04x] expect data discontinuity want:%d getting:%d\n",
495  __FUNCTION__, idb, reg_off, residue, LL_BLOCK);
496  residue = LL_BLOCK;
497  }
498 
499  cnt = residue/LL_BLOCK;
500 
501  dma_desc = (bd->pa + idb*LL_MAX_LEN)
502  | LL_NB(cnt)<< AFDMAC_DESC_LEN_SHL
503  | ((reg_off>>2)&0x0f);
504 
505  dev_dbg(pdev(adev), "%s() [%d] [%04x] %p := 0x%08x",
506  __FUNCTION__, idb, reg_off, adev->remote+reg_off, dma_desc);
507 
508  writel(dma_desc, adev->remote+reg_off);
509  msleep(5);
510  }
511  }
512 
513  _afs_write_dmareg(adev, DMA_DIR_DESC_LEN(dma_sel), cursor-1);
514 
515  dma_ctrl &= ~dma_pp(dma_sel, DMA_CTRL_RECYCLE);
516  dma_ctrl |= dma_pp(dma_sel, DMA_CTRL_LOW_LAT|DMA_CTRL_RAM);
517 
518  DMA_CTRL_WR(adev, dma_ctrl);
519  afs_start_dma(adev, dma_sel);
520 }
521 
522 static int _afs_dma_started(struct AFHBA_DEV *adev, int shl)
523 {
524  u32 ctrl = DMA_CTRL_RD(adev);
525  ctrl >>= shl;
526  return (ctrl&DMA_CTRL_EN) != 0;
527 }
528 
529 
530 static inline int afs_dma_started(struct AFHBA_DEV *adev, enum DMA_SEL dma_sel)
531 {
532  return _afs_dma_started(adev, dma_pp(dma_sel, DMA_CTRL_PUSH_SHL));
533 }
534 
535 
536 
537 static int afs_aurora_lane_up(struct AFHBA_DEV *adev)
538 {
539 
540  u32 stat;
541 
542  stat = afhba_read_reg(adev, ASR(adev->ACR));
543 
545  ++adev->aurora_status_read_count;
546  if (adev->aurora_status_read_count == 1){
547  dev_info(pdev(adev), "afs_aurora_lane_up %c status 0x%08x",
548  aurora_id(adev), stat);
549  }
550 
551  return (stat & AFHBA_AURORA_STAT_LANE_UP) != 0;
552 }
553 
554 /* super-paranoid remote access */
555 void __afs_dma_reset(struct AFHBA_DEV *adev, u32 dma_sel)
556 {
557  if (afs_comms_ready(adev)){
558  DMA_CTRL_CLR(adev, dma_pp(dma_sel, DMA_CTRL_EN));
559  DMA_CTRL_SET(adev, dma_pp(dma_sel, DMA_CTRL_FIFO_RST));
560  DMA_CTRL_CLR(adev, dma_pp(dma_sel, DMA_CTRL_FIFO_RST));
561  }
562 }
563 void __afs_stop_dma(struct AFHBA_DEV *adev, u32 dma_sel)
564 {
565  if (afs_comms_ready(adev)){
566  DMA_CTRL_CLR(adev, dma_pp(dma_sel, DMA_CTRL_EN));
567  }
568 }
569 
570 void __afs_start_dma(struct AFHBA_DEV *adev, u32 dma_sel)
571 {
572  if (afs_comms_ready(adev)){
573  u32 dmacr;
574  DMA_CTRL_SET(adev, dma_pp(dma_sel, DMA_CTRL_EN));
575  dmacr = DMA_CTRL_RD(adev);
576  if ((dmacr&dma_pp(dma_sel, DMA_CTRL_EN)) == 0){
577  dev_err(pdev(adev), "DMA_CTRL_EN NOT SET wanted:%08x got:%08x",
578  dma_pp(dma_sel, DMA_CTRL_EN), dmacr);
579  }
580  }
581 }
582 
583 static int afs_aurora_errors(struct AFHBA_DEV *adev)
584 {
585  u32 stat = afhba_read_reg(adev, ASR(adev->ACR));
586  int link_warning = 0;
587 
588  if ((stat&AFHBA_AURORA_STAT_ERR) != 0){
589  u32 ctrl = afhba_read_reg(adev, adev->ACR);
590  afhba_write_reg(adev, adev->ACR, ctrl|AFHBA_AURORA_CTRL_CLR);
591  if (++adev->aurora_error_count==1){
592  dev_info(pdev(adev),
593  "aurora%c initial s:0x%08x m:0x%08x e:0x%08x",
594  adev->sfp-SFP_A + 'A',
596  }else{
597  dev_warn(pdev(adev),
598  "aurora%c error: [%d] s:0x%08x m:0x%08x e:0x%08x",
599  adev->sfp-SFP_A + 'A',
600  adev->aurora_error_count,
602  link_warning = 1;
603  }
604  stat = afhba_read_reg(adev, ASR(adev->ACR));
605  if ((stat&AFHBA_AURORA_STAT_ERR) != 0){
606  dev_err(pdev(adev),
607  "aurora%c error: [%d] s:0x%08x m:0x%08x e:0x%08x NOT CLEARED",
608  adev->sfp-SFP_A + 'A',
609  adev->aurora_error_count,
611  msleep(1000);
612  return -1;
613  }else{
614  return link_warning? -1: 1;
615  }
616  }else{
617  return 0;
618  }
619 }
620 
621 static void _afs_pcie_mirror_init(struct AFHBA_DEV *adev)
622 {
623  int ireg;
624 
625  for (ireg = PCIE_CNTRL; ireg <= PCIE_BUFFER_CTRL; ++ireg){
626  u32 local = afhba_read_reg(adev, ireg*sizeof(u32));
627  switch(ireg){
628  case PCIE_CONF:
629  local |= adev->sfp << PCIE_CONF_AF_PORT_SHL;
630  }
631  PCI_REG_WRITE(adev, ireg, local);
632  }
633 }
634 
635 #define MSLEEP_TO 10
636 
637 static int is_valid_z_ident(unsigned z_ident, char buf[], int maxbuf)
638 {
639  if ((z_ident&0x21060000) == 0x21060000){
640  snprintf(buf, maxbuf, "acq2106_%03d.comms%X",
641  z_ident&0x0ffff, (z_ident&0x00f00000)>>20);
642  return 1;
643  }else if ((z_ident&0xe4330000) == 0xe4330000){
644  snprintf(buf, maxbuf, "kmcu_%03d.comms%x",
645  z_ident&0x0ffff, (z_ident&0x00f00000)>>20);
646  return 1;
647  }else if ((z_ident&0x43000000) == 0x43000000){
648  snprintf(buf, maxbuf, "kmcu_%03d.comms%x",
649  z_ident&0x0ffff, 0);
650  return 1;
651  }else{
652  return 0;
653  }
654 }
655 
656 static int _afs_check_read(struct AFHBA_DEV *adev)
657 {
658  struct AFHBA_STREAM_DEV* sdev = adev->stream_dev;
659  unsigned z_ident1 = _afs_read_zynqreg(adev, Z_IDENT);
660  unsigned z_ident2 = _afs_read_zynqreg(adev, Z_IDENT);
661 
662  if (z_ident2 == 0xffffffff || (z_ident2&~0x0ffff) == 0xdead0000){
663  dev_err(pdev(adev), "ERROR reading Z_IDENT %08x, please reboot now", z_ident2);
664  return -1;
665  }else{
666  char buf[80];
667  int valid_id = is_valid_z_ident(z_ident2, buf, 80);
668 #define ZI_FMT "[%d] Z_IDENT 1:0x%08x 2:0x%08x %s"
669 
670  if (valid_id){
671  if (sdev->zi_report != ZI_GOOD){
672  dev_info(pdev(adev), ZI_FMT,
673  adev->idx, z_ident1, z_ident2,
674  buf);
675  sdev->zi_report = ZI_GOOD;
676  }
677  return 0;
678  }else{
679  if (sdev->zi_report != ZI_BAD){
680  dev_info(pdev(adev), ZI_FMT,
681  adev->idx, z_ident1, z_ident2,
682  "ID NOT VALID");
683  sdev->zi_report = ZI_BAD;
684  }
685  return 1;
686  }
687  }
688 }
689 
690 static int _afs_comms_init(struct AFHBA_DEV *adev)
691 {
692  struct AFHBA_STREAM_DEV* sdev = adev->stream_dev;
693  enum { WAIT_INIT, WAIT_LANE_UP, WAIT_LANE_STILL_UP, CHECK_READ, TXEN } state = WAIT_INIT;
694  int ticks_in_state = 0;
695 #define CHANGE_STATE(s) state = (s); ticks_in_state = 0; continue
696 
697  for ( ; ; msleep(MSLEEP_TO), ++ticks_in_state){
698  dev_info(pdev(adev), "%s state:%d %s ticks:%d", __FUNCTION__, state,
699  state==WAIT_INIT? "WAIT_INIT": state==WAIT_LANE_UP? "WAIT_LANE_UP":
700  state==WAIT_LANE_STILL_UP?"WAIT_LANE_STILL_UP": state==CHECK_READ? "CHECK_READ":
701  state==TXEN? "TXEN": "???",
702  ticks_in_state);
703 
704  switch(state){
705  case WAIT_INIT:
707  CHANGE_STATE(WAIT_LANE_UP);
708  break;
709  case WAIT_LANE_UP:
710  if (afs_aurora_lane_up(adev)){
711  CHANGE_STATE(WAIT_LANE_STILL_UP);
712  }else{
713  if (ticks_in_state > 100){
714  return 0;
715  }
716  }
717  break;
718  case WAIT_LANE_STILL_UP:
719  if (afs_aurora_lane_up(adev)){
720  if (ticks_in_state > 3){
721  dev_info(pdev(adev), "%s call mirror_init 01", __FUNCTION__);
722  afs_init_dma_clr(adev);
723  _afs_pcie_mirror_init(adev);
724  CHANGE_STATE(CHECK_READ);
725  }
726  }else{
727  CHANGE_STATE(WAIT_LANE_UP);
728  }
729  break;
730  case CHECK_READ:
731  sdev->comms_init_done = _afs_check_read(adev) == 0;
732  if (afs_aurora_errors(adev) == -1 ){
733  dev_warn(pdev(adev), "%s bad aurora, TXDIS", __FUNCTION__);
735  CHANGE_STATE(TXEN);
736  }else{
737  return sdev->comms_init_done;
738  }
739  case TXEN:
740  afhba_write_reg(adev, adev->ACR, 0);
741  CHANGE_STATE(WAIT_INIT);
742  default:
743  dev_err(pdev(adev), "%s illegal state %d", __FUNCTION__, state);
744  return 0;
745  }
746  }
747 }
748 
749 int afs_comms_init(struct AFHBA_DEV *adev)
750 {
751  struct AFHBA_STREAM_DEV* sdev = adev->stream_dev;
752 
753  if (afs_aurora_lane_up(adev)){
754  if (!adev->link_up){
755  dev_info(pdev(adev), "aurora%c link up!", aurora_id(adev));
756  adev->link_up = true;
757  }
758  if (!sdev->comms_init_done){
759  sdev->comms_init_done = _afs_comms_init(adev);
760  }
761 
762  return sdev->comms_init_done;
763  }else{
764  if (adev->link_up){
765  dev_info(pdev(adev), "aurora%c link down!", aurora_id(adev));
766  adev->link_up = false;
767  }
768  dev_dbg(pdev(adev), "aurora lane down");
769  return sdev->comms_init_done = false;
770  }
771 }
772 
773 int afs_comms_ready(struct AFHBA_DEV *adev)
774 {
775  struct AFHBA_STREAM_DEV* sdev = adev->stream_dev;
776  return adev->link_up && sdev->comms_init_done;
777 }
778 
779 
780 #define RTDMAC_DATA_FIFO_CNT 0x1000
781 #define RTDMAC_DESC_FIFO_CNT 0x1000
782 
783 #define DATA_FIFO_SZ (RTDMAC_DATA_FIFO_CNT*sizeof(unsigned))
784 #define DESC_FIFO_SZ (RTDMAC_DESC_FIFO_CNT*sizeof(unsigned))
785 
786 static void mark_empty(struct device *dev, struct HostBuffer *hb){
787  u32 mark_len = 2 * sizeof(u32);
788  u32 offset = hb->req_len - mark_len;
789  u32 *pmark = (u32*)(hb->va + offset);
790 
791  pmark[0] = EMPTY1;
792  pmark[1] = EMPTY2;
793 
794  dma_sync_single_for_device(dev, hb->pa+offset, mark_len, PCI_DMA_FROMDEVICE);
795 }
796 
797 
798 static int is_marked_empty(struct device *dev, struct HostBuffer *hb){
799  u32 mark_len = 2 * sizeof(u32);
800  u32 offset = hb->req_len - mark_len;
801  u32 *pmark = (u32*)(hb->va + offset);
802  int is_empty;
803 
804  dma_sync_single_for_cpu(dev, hb->pa+offset, mark_len, PCI_DMA_FROMDEVICE);
805 
806  is_empty = pmark[0] == EMPTY1 && pmark[1] == EMPTY2;
807 
808  return is_empty;
809 }
810 
811 static int queue_next_free_buffer(struct AFHBA_DEV *adev)
812 {
813  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
814  int rc = 0;
815 
816  if (mutex_lock_interruptible(&sdev->list_mutex)){
817  return -ERESTARTSYS;
818  }
819  if (!list_empty_careful(&sdev->bp_empties.list)){
820  struct HostBuffer *hb = HB_ENTRY(sdev->bp_empties.list.next);
821 
822  mark_empty(&adev->pci_dev->dev, hb);
823 
824  afs_load_push_descriptor(adev, hb->ibuf);
825  hb->bstate = BS_FILLING;
826  list_move_tail(&hb->list, &sdev->bp_filling.list);
827  rc = 1;
828  }
829  mutex_unlock(&sdev->list_mutex);
830  return rc;
831 }
832 
833 static void queue_free_buffers(struct AFHBA_DEV *adev)
834 {
835  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
836  struct JOB *job = &sdev->job;
837  int in_queue = job->buffers_queued -
838  (job->buffers_received+job->buffers_discarded);
839 
840  while (job->buffers_queued < job->buffers_demand){
841  if (queue_next_free_buffer(adev)){
842  ++job->buffers_queued;
843  ++in_queue;
844  }else{
845  if (in_queue == 0){
846  ++stalls;
847  }
848  break;
849  }
850  }
851 }
852 
853 struct HostBuffer* hb_from_descr(struct AFHBA_DEV *adev, u32 inflight_descr)
854 {
855  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
856  int ii;
857 
858  for (ii = 0; ii < nbuffers; ++ii){
859  if (sdev->hbx[ii].descr == inflight_descr){
860  return &sdev->hbx[ii];
861  }
862  }
863  return 0;
864 }
865 
866 static void report_inflight(
867  struct AFHBA_DEV *adev, int ibuf, int is_error, char *msg)
868 {
869  if (adev->stream_dev->job.buffers_demand == 0){
870  return;
871  }
872  if (dma_descriptor_ram){
873  dev_warn(pdev(adev), "%s: buffer %02d %s",
874  msg, ibuf, is_error? "ERROR": "WARNING");
875  }else{
876  u32 inflight_descr = DMA_PUSH_DESC_STA_RD(adev);
877  struct HostBuffer* inflight = hb_from_descr(adev, inflight_descr);
878  u32 fifsta = DMA_DESC_FIFSTA_RD(adev);
879 
880  dev_warn(pdev(adev), "%s: buffer %02d last descr:%08x [%02d] fifo:%08x %s",
881  msg,
882  ibuf,
883  inflight_descr,
884  inflight? inflight->ibuf: -1,
885  fifsta,
886  is_error? "ERROR": "WARNING");
887  }
888 
889 }
890 static void report_stuck_buffer(struct AFHBA_DEV *adev, int ibuf)
891 {
892  report_inflight(adev, ibuf, 0, "buffer was skipped");
893 }
894 
895 static void return_empty(struct AFHBA_DEV *adev, struct HostBuffer *hb)
897 {
898  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
899  dev_dbg(pdev(adev), "ibuf %d", hb->ibuf);
900  hb->bstate = BS_EMPTY;
901  list_move_tail(&hb->list, &sdev->bp_empties.list);
902 }
903 
904 static int _queue_full_buffer(struct AFHBA_DEV *adev, struct HostBuffer* hb, int nrx)
905 {
906  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
907  struct JOB *job = &sdev->job;
908 
909  hb->esta = read_astatus2(adev);
910  if (buffer_debug){
911  report_inflight(adev, hb->ibuf, 0, "->FULL");
912  }
913 
914  hb->bstate = BS_FULL;
915  list_move_tail(&hb->list, &sdev->bp_full.list);
916  job->buffers_received++;
917  return nrx + 1;
918 }
919 static int queue_full_buffers(struct AFHBA_DEV *adev)
920 {
921  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
922  struct HostBuffer* hb;
923  struct HostBuffer* tmp;
924  struct HostBuffer* first = 0;
925  struct JOB *job = &sdev->job;
926  int nrx = 0;
927  int ifilling = 0;
928 
929  if (mutex_lock_interruptible(&sdev->list_mutex)){
930  return -ERESTARTSYS;
931  }
932 
933  list_for_each_entry_safe(hb, tmp, &sdev->bp_filling.list, list){
934  if (++ifilling == 1){
935  first = hb;
936  }
937  if (is_marked_empty(&adev->pci_dev->dev, hb)){
938  if (ifilling >= max_empty_backlog_check){
939  break; /* top 2 buffers empty: no action */
940  }else{
941  continue; /* check skipped data. */
942  }
943  }else{
944  if (ifilling > 1 && first && hb != first){
945  if (is_marked_empty(&adev->pci_dev->dev, first)){
946 
948  report_inflight(adev, first->ibuf, 0, "queuing dirty");
949  nrx = _queue_full_buffer(adev, first, nrx);
950  }else{
951  report_stuck_buffer(adev, first->ibuf);
952  return_empty(adev, first);
954  dev_warn(pdev(adev), "stop_on_skipped_buffer triggered");
955  job->please_stop = PS_PLEASE_STOP;
956  }
957  }
958  first = 0;
959  }else{
960  report_inflight(adev, first->ibuf, 0, "jackpot");
961  nrx = _queue_full_buffer(adev, first, nrx);
962  first = 0;
963  }
964  }
965 
966  nrx = _queue_full_buffer(adev, hb, nrx);
967  }
968  }
969 
970  if (nrx){
971  if (ifilling > NBUFFERS){
972  dev_warn(pdev(adev), "ifilling > NBUFFERS?");
973  ifilling = 0;
974  }
975  job->catchup_histo[nrx]++;
976  }
977 
978  mutex_unlock(&sdev->list_mutex);
979  return nrx;
980 }
981 
982 
983 
984 static void init_histo_buffers(struct AFHBA_STREAM_DEV* sdev)
985 {
986  int ii;
987 
988  sdev->data_fifo_histo = kzalloc(DATA_FIFO_SZ, GFP_KERNEL);
989  sdev->desc_fifo_histo = kzalloc(DESC_FIFO_SZ, GFP_KERNEL);
990 
991  /* give it a test pattern .. */
992 
993  for (ii = 0; ii != RTDMAC_DATA_FIFO_CNT; ++ii){
994  sdev->data_fifo_histo[ii] = 0x70000000 + ii;
995  }
996  for (ii = 0; ii != RTDMAC_DESC_FIFO_CNT; ++ii){
997  sdev->desc_fifo_histo[ii] = 0x50000000 + ii;
998  }
999 }
1000 
1001 int afs_init_buffers(struct AFHBA_DEV* adev)
1002 {
1003  struct AFHBA_STREAM_DEV* sdev = adev->stream_dev;
1004  struct HostBuffer *hb;
1005  int order = getOrder(buffer_len);
1006  int ii;
1007 
1008  dev_dbg(pdev(adev), "afs_init_buffers() 01 order=%d", order);
1009 
1010  sdev->hbx = kzalloc(sizeof(struct HostBuffer)*nbuffers, GFP_KERNEL);
1011  INIT_LIST_HEAD(&sdev->bp_empties.list);
1012  INIT_LIST_HEAD(&sdev->bp_filling.list);
1013  INIT_LIST_HEAD(&sdev->bp_full.list);
1014  spin_lock_init(&sdev->job_lock);
1015 
1016  mutex_init(&sdev->list_mutex);
1017  mutex_lock(&sdev->list_mutex);
1018 
1019  sdev->buffer_len = buffer_len;
1020  dev_dbg(pdev(adev), "allocating %d buffers size:%d order:%d dev.dma_mask:%08llx",
1021  nbuffers, buffer_len, order, *adev->pci_dev->dev.dma_mask);
1022 
1023  for (hb = sdev->hbx, ii = 0; ii < nbuffers; ++ii, ++hb){
1024  void *buf = (void*)__get_free_pages(GFP_KERNEL|GFP_DMA32, order);
1025 
1026  if (!buf){
1027  dev_err(pdev(adev), "failed to allocate buffer %d", ii);
1028  break;
1029  }
1030 
1031  dev_dbg(pdev(adev), "buffer %2d allocated at %p, map it", ii, buf);
1032 
1033  hb->ibuf = ii;
1034  hb->pa = dma_map_single(&adev->pci_dev->dev, buf,
1035  buffer_len, PCI_DMA_FROMDEVICE);
1036  hb->va = buf;
1037  hb->len = buffer_len;
1038 
1039  dev_dbg(pdev(adev), "buffer %2d allocated, map done", ii);
1040 
1041  if ((hb->pa & (AFDMAC_PAGE-1)) != 0){
1042  dev_err(pdev(adev), "HB NOT PAGE ALIGNED");
1043  WARN_ON(true);
1044  return -1;
1045  }
1046 
1047  hb->descr = hb->pa | 0 | AFDMAC_DESC_EOT | (ii&AFDMAC_DESC_ID_MASK);
1048  hb->bstate = BS_EMPTY;
1049 
1050  dev_dbg(pdev(adev), "[%d] %p %08x %d %08x",
1051  ii, hb->va, hb->pa, hb->len, hb->descr);
1052  list_add_tail(&hb->list, &sdev->bp_empties.list);
1053  }
1054  sdev->nbuffers = nbuffers;
1056  sdev->init_descriptors(sdev);
1057  init_waitqueue_head(&sdev->work.w_waitq);
1058  init_waitqueue_head(&sdev->return_waitq);
1059 
1060  mutex_unlock(&sdev->list_mutex);
1061 
1062  if (dma_descriptor_ram){
1063  sdev->push_descr_ram = sdev->pull_descr_ram = 1;
1064  }
1065 
1066  init_histo_buffers(sdev);
1067  dev_dbg(pdev(adev), "afs_init_buffers() 99");
1068  return 0;
1069 }
1070 
1071 static irqreturn_t afs_cos_isr(int irq, void *data)
1072 {
1073  struct AFHBA_DEV* adev = (struct AFHBA_DEV*)data;
1074 
1075  unsigned cr = afhba_read_reg(adev, HOST_PCIE_INTERRUPT_REG);
1077  dev_info(pdev(adev), "afs_cos_isr %08x", cr);
1078 
1079  return IRQ_HANDLED;
1080 }
1081 
1082 
1083 
1084 static int hook_interrupts(struct AFHBA_DEV* adev)
1085 {
1086  int rc = pci_enable_msi(adev->pci_dev);
1087  if (rc < 0){
1088  dev_err(pdev(adev), "pci_enable_msi_exact(%d) FAILED", 1);
1089  return rc;
1090  }
1091  rc = request_irq(adev->pci_dev->irq, afs_cos_isr, IRQF_SHARED, "afhba", adev);
1092  if (rc < 0) {
1093  pr_warn("afhba.%d: request_irq =%d failed!\n",
1094  adev->idx, adev->pci_dev->irq);
1095  pci_disable_msi(adev->pci_dev);
1096  return rc;
1097  }
1098  return 0;
1099 }
1100 
1101 
1102 static void smooth(unsigned *rate, unsigned *old, unsigned *new)
1103 {
1104 #define RATE *rate
1105 #define OLD *old
1106 #define NEW *new
1107 
1108  if (likely(NEW > OLD)){
1109  RATE = (SMOO*RATE + (10-SMOO)*(NEW-OLD))/10;
1110  }else{
1111  RATE = 0;
1112  }
1113  OLD = NEW;
1114 #undef NEW
1115 #undef OLD
1116 #undef RATE
1117 }
1118 
1119 
1120 static int as_mon(void *arg)
1121 {
1122  struct AFHBA_DEV* adev = (struct AFHBA_DEV*)arg;
1123  wait_queue_head_t waitq;
1124 
1125  init_waitqueue_head(&waitq);
1126 
1127  while(!kthread_should_stop()){
1128  struct JOB *job = &adev->stream_dev->job;
1129  wait_event_interruptible_timeout(waitq, 0, HZ);
1130 
1131  smooth(&job->rx_rate,
1132  &job->rx_buffers_previous, &job->buffers_received);
1133 
1134  smooth(&job->int_rate, &job->int_previous, &job->ints);
1135  }
1136 
1137  return 0;
1138 }
1139 
1140 
1141 static void check_fifo_status(struct AFHBA_DEV* adev)
1142 {
1143 #ifdef TODOLATER
1144  u32 desc_sta = DMA_PUSH_DESC_STA_RD(adev);
1145  u32 desc_flags = check_fifo_xxxx(tdev->desc_fifo_histo, desc_sta);
1146  u32 data_sta = rtd_read_reg(tdev, RTMT_C_DATA_FIFSTA);
1147  u32 data_flags = check_fifo_xxxx(tdev->data_fifo_histo, data_sta);
1148 
1149  if ((data_flags & RTMT_H_XX_DMA_FIFSTA_FULL) &&
1150  tdev->job.errors < 10){
1152  err("GAME OVER: %d FIFSTA_DATA_OVERFLOW: 0x%08x",
1153  tdev->idx, data_sta);
1154  if (++tdev->job.errors == 10){
1155  err("too many errors, turning reporting off ..");
1156  }
1157  }
1158  if ((desc_flags & RTMT_H_XX_DMA_FIFSTA_FULL) != 0 &&
1159  tdev->job.errors < 10){
1160  err("GAME OVER: %d FIFSTA_DESC_OVERFLOW: 0x%08x",
1161  tdev->idx, desc_sta);
1162  if (++tdev->job.errors == 10){
1163  err("too many errors, turning reporting off ..");
1164  }
1165  }
1166 #endif
1167 }
1168 
1169 int job_is_go(struct JOB* job)
1170 {
1171  return !job->please_stop && job->buffers_queued < job->buffers_demand;
1172 }
1173 
1174 
1175 int load_buffers(struct AFHBA_DEV* adev)
1176 /* return 0 on success */
1177 {
1178  struct AFHBA_STREAM_DEV* sdev = adev->stream_dev;
1179 
1180  queue_free_buffers(adev);
1181  if (dma_descriptor_ram > 1 && !sdev->job.dma_started){
1182  return validate_dma_descriptor_ram(adev, DMA_PUSH_DESC_RAM,
1183  sdev->push_ram_cursor, 1);
1184  }else{
1185  queue_free_buffers(adev);
1186  return 0;
1187  }
1188 }
1189 
1190 int start_job(struct AFHBA_DEV* adev)
1191 /* returns 0 on success */
1192 {
1193  int retry = 0;
1194  int load_ok = 0;
1195 
1196  afs_stop_dma(adev, DMA_PUSH_SEL); /* belt+braces */
1197  afs_configure_streaming_dma(adev, DMA_PUSH_SEL);
1198 
1199  while(retry++ < max_dma_load_retry){
1200  if (load_buffers(adev) == 0){
1201  load_ok = 1;
1202  break;
1203  }
1204  }
1205  if (!load_ok){
1206  dev_err(pdev(adev), "ERROR dma load retry count exceeded");
1207  return 1;
1208  }
1209  afs_start_dma(adev, DMA_PUSH_SEL);
1210 
1211  spin_lock(&adev->stream_dev->job_lock);
1212  adev->stream_dev->job.dma_started = 1;
1213  spin_unlock(&adev->stream_dev->job_lock);
1214  return 0;
1215 }
1216 
1217 static int afs_isr_work(void *arg)
1218 {
1219  struct AFHBA_DEV* adev = (struct AFHBA_DEV*)arg;
1220  struct AFHBA_STREAM_DEV* sdev = adev->stream_dev;
1221  struct JOB* job = &sdev->job;
1222 
1223  int loop_count = 0;
1224 /* this is going to be the top RT process */
1225  struct sched_param param = { .sched_priority = 10 };
1226  int please_check_fifo = 0;
1227  int job_is_go_but_aurora_is_down = 0;
1228  unsigned loop_jiffies = 0;
1229  unsigned last_amon_jiffies = 0;
1230 
1231  sched_setscheduler(current, SCHED_FIFO, &param);
1232  afs_comms_init(adev);
1233 
1234  for ( ; !kthread_should_stop(); ++loop_count, loop_jiffies += WORK_TO){
1235  int timeout = wait_event_interruptible_timeout(
1236  sdev->work.w_waitq,
1237  test_and_clear_bit(WORK_REQUEST, &sdev->work.w_to_do),
1238  WORK_TO) == 0;
1239 
1240  if (!timeout || loop_count%10 == 0){
1241  dev_dbg(pdev(adev), "TIMEOUT? %d queue_free_buffers() ? %d",
1242  timeout, job_is_go(job) );
1243  }
1244 
1245  if (loop_jiffies - last_amon_jiffies > amon_jiffies){
1246  if (aurora_monitor && !afs_comms_init(adev) &&
1247  job_is_go(job) && !job_is_go_but_aurora_is_down){
1248  dev_warn(pdev(adev), "job is go but aurora is down");
1249  job_is_go_but_aurora_is_down = 1;
1250  }else{
1251  job_is_go_but_aurora_is_down = 0;
1252  }
1253  last_amon_jiffies = loop_jiffies;
1254  }
1255 
1256  if (job_is_go(job)){
1257  if (!job->dma_started){
1258  if (start_job(adev) != 0){
1259  job->please_stop = PS_PLEASE_STOP;
1260  }
1261  }else{
1262  queue_free_buffers(adev);
1263  }
1264  }
1265 
1266  if (job->buffers_demand > 0 && queue_full_buffers(adev) > 0){
1267  wake_up_interruptible(&sdev->return_waitq);
1268  }
1269 
1270  spin_lock(&sdev->job_lock);
1271 
1272  if (sdev->job.on_pull_dma_timeout){
1273  sdev->job.on_pull_dma_timeout(adev);
1274  }
1275  if (sdev->job.on_push_dma_timeout){
1276  sdev->job.on_push_dma_timeout(adev);
1277  }
1278 
1279  switch(job->please_stop){
1280  case PS_STOP_DONE:
1281  break;
1282  case PS_PLEASE_STOP:
1283  afs_stop_dma(adev, DMA_PUSH_SEL);
1284  job->please_stop = PS_STOP_DONE;
1285  job->dma_started = 0;
1286  if (dma_descriptor_ram > 1){
1287  validate_dma_descriptor_ram(
1288  adev, DMA_PUSH_DESC_RAM, sdev->push_ram_cursor, 99);
1289  }
1290  break;
1291 /*
1292  default:
1293  please_check_fifo = job_is_go(job) &&
1294  afs_dma_started(adev, DMA_PUSH_SEL);
1295 */
1296  }
1297  spin_unlock(&sdev->job_lock);
1298 
1299  if (please_check_fifo){
1300  check_fifo_status(adev);
1301  please_check_fifo = 0;
1302  }
1303  }
1304 
1305  return 0;
1306 }
1307 
1308 
1309 static void startWork(struct AFHBA_DEV *adev)
1310 {
1311  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
1312  sdev->work.w_task = kthread_run(afs_isr_work, adev, adev->name);
1313  sdev->work.mon_task = kthread_run(as_mon, adev, adev->mon_name);
1314 }
1315 
1316 static void stopWork(struct AFHBA_DEV *adev)
1317 {
1318  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
1319  if (sdev->work.w_task){
1320  kthread_stop(sdev->work.w_task);
1321  }
1322  if (sdev->work.mon_task){
1323  kthread_stop(sdev->work.mon_task);
1324  }
1325 }
1326 
1328  struct file *file, char *buf, size_t count, loff_t *f_pos)
1329 {
1330  unsigned *the_histo = PD(file)->private;
1331  int maxentries = PD(file)->private2;
1332  unsigned cursor = *f_pos; /* f_pos counts in entries */
1333  int rc;
1334 
1335  if (cursor >= maxentries){
1336  return 0;
1337  }else{
1338  int headroom = (maxentries - cursor) * sizeof(unsigned);
1339  if (count > headroom){
1340  count = headroom;
1341  }
1342  }
1343  rc = copy_to_user(buf, the_histo+cursor, count);
1344  if (rc){
1345  return -1;
1346  }
1347 
1348  *f_pos += count/sizeof(unsigned);
1349  return count;
1350 }
1351 
1352 static struct file_operations afs_fops_histo = {
1353  .read = afs_histo_read,
1354  .release = afhba_release
1355 };
1356 
1357 
1358 static int rtm_t_start_stream(struct AFHBA_DEV *adev, unsigned buffers_demand)
1359 {
1360  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
1361  struct JOB *job = &sdev->job;
1362 
1363  dev_dbg(pdev(adev), "01");
1364  afs_dma_reset(adev, DMA_PUSH_SEL);
1365  memset(job, 0, sizeof(struct JOB));
1366 
1367  job->buffers_demand = buffers_demand;
1368  if (unlikely(list_empty_careful(&sdev->bp_empties.list))){
1369  dev_err(pdev(adev), "no free buffers");
1370  return -ERESTARTSYS;
1371  }
1372 
1373  spin_lock(&sdev->job_lock);
1374  job->please_stop = PS_OFF;
1375  spin_unlock(&sdev->job_lock);
1376  set_bit(WORK_REQUEST, &sdev->work.w_to_do);
1377  wake_up_interruptible(&sdev->work.w_waitq);
1378  dev_dbg(pdev(adev), "99");
1379  return 0;
1380 }
1381 
1382 int afs_histo_open(struct inode *inode, struct file *file, unsigned *histo, int hcount)
1383 {
1384  file->f_op = &afs_fops_histo;
1385  PD(file)->private = histo;
1386  PD(file)->private2 = hcount;
1387  return 0;
1388 }
1389 
1390 int afs_reset_buffers(struct AFHBA_DEV *adev)
1391 /* handle with care! */
1392 {
1393  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
1394  struct HostBuffer *hb = sdev->hbx;
1395  int ii;
1396 
1397  if (mutex_lock_interruptible(&sdev->list_mutex)){
1398  return -1;
1399  }
1400  INIT_LIST_HEAD(&sdev->bp_empties.list);
1401  INIT_LIST_HEAD(&sdev->bp_filling.list);
1402  INIT_LIST_HEAD(&sdev->bp_full.list);
1403 
1404 
1405 
1406  for (ii = 0; ii < nbuffers; ++ii, ++hb){
1407  hb->bstate = BS_EMPTY;
1408  list_add_tail(&hb->list, &sdev->bp_empties.list);
1409  }
1410 
1411  sdev->init_descriptors(sdev);
1412 
1413 
1414 
1415  memset(sdev->data_fifo_histo, 0, DATA_FIFO_SZ);
1416  memset(sdev->desc_fifo_histo, 0, DESC_FIFO_SZ);
1417 
1418  mutex_unlock(&sdev->list_mutex);
1419  return 0;
1420 }
1421 
1422 
1423 void afs_stop_llc_push(struct AFHBA_DEV *adev)
1424 {
1425  DEV_DBG(pdev(adev), "afs_stop_llc_push()");
1426  DEV_DBG(pdev(adev), "afs_dma_set_recycle(0)");
1427  msleep(1);
1428  afs_dma_set_recycle(adev, DMA_PUSH_SEL, 0);
1429  afs_dma_reset(adev, DMA_PUSH_SEL);
1430 }
1431 
1432 void afs_stop_llc_pull(struct AFHBA_DEV *adev)
1433 {
1434  dev_info(pdev(adev), "afs_stop_llc_pull()");
1435  afs_dma_reset(adev, DMA_PULL_SEL);
1436 }
1437 
1439 {
1440  dev_info(pdev(adev), "afs_stop_stream_push()");
1441  afs_dma_reset(adev, DMA_PUSH_SEL);
1442 }
1443 
1445 {
1446  dev_info(pdev(adev), "afs_stop_stream_pull()");
1447  afs_dma_reset(adev, DMA_PULL_SEL);
1448 }
1449 
1450 int push_dma_timeout(struct AFHBA_DEV *adev)
1451 /* called with job_lock ON */
1452 {
1453  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
1454  u32 dma_ctrl = DMA_CTRL_RD(adev);
1455  int action = 0;
1456 
1457  action = sdev->job.on_push_dma_timeout != 0 &&
1458  (dma_ctrl&DMA_CTRL_EN) == 0;
1459  if (action){
1460  struct XLLC_DEF* xllc_def = &sdev->job.push_llc_def;
1461  dev_err(pdev(adev), "DMA_CTRL_EN NOT SET attempt restart");
1462  afs_dma_reset(adev, DMA_PUSH_SEL);
1463  afs_load_llc_single_dma(adev, DMA_PUSH_SEL, xllc_def->pa, xllc_def->len);
1464  sdev->push_dma_timeouts++;
1465  }
1466  return 0;
1467 }
1468 long afs_start_ai_llc(struct AFHBA_DEV *adev, struct XLLC_DEF* xllc_def)
1469 {
1470  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
1471  struct JOB* job = &sdev->job;
1472 
1473  spin_lock(&sdev->job_lock);
1474  job->please_stop = PS_OFF;
1475  spin_unlock(&sdev->job_lock);
1476  sdev->onStopPush = afs_stop_llc_push;
1477 
1478  if (xllc_def->pa == RTM_T_USE_HOSTBUF){
1479  xllc_def->pa = sdev->hbx[0].pa;
1480  }
1481  afs_dma_reset(adev, DMA_PUSH_SEL);
1482  afs_load_llc_single_dma(adev, DMA_PUSH_SEL, xllc_def->pa, xllc_def->len);
1483  spin_lock(&sdev->job_lock);
1484  job->please_stop = PS_OFF;
1485  job->on_push_dma_timeout = push_dma_timeout;
1486  job->push_llc_def = *xllc_def;
1487  spin_unlock(&sdev->job_lock);
1488  return 0;
1489 }
1490 long afs_start_ao_llc(struct AFHBA_DEV *adev, struct XLLC_DEF* xllc_def)
1491 {
1492  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
1493 
1494  sdev->job.please_stop = PS_OFF;
1495  sdev->onStopPull = afs_stop_llc_pull;
1496 
1497  if (xllc_def->pa == RTM_T_USE_HOSTBUF){
1498  xllc_def->pa = sdev->hbx[0].pa;
1499  }
1500  afs_dma_reset(adev, DMA_PULL_SEL);
1501  afs_load_llc_single_dma(adev, DMA_PULL_SEL, xllc_def->pa, xllc_def->len);
1502  return 0;
1503 }
1504 
1505 int afs_dma_open(struct inode *inode, struct file *file)
1506 {
1507  struct AFHBA_DEV *adev = PD(file)->dev;
1508  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
1509 
1510  int ii;
1511 
1512  dev_dbg(pdev(adev), "45: DMA open");
1513 
1515  if (sdev->pid == 0){
1516  sdev->pid = current->pid;
1517  }
1518 
1519  if (sdev->pid != current->pid){
1520  return -EBUSY;
1521  }
1522 
1523  sdev->shot++;
1524 
1525  if (sdev->buffer_len == 0) sdev->buffer_len = buffer_len;
1526  sdev->req_len = min(sdev->buffer_len, buffer_len);
1527 
1528  for (ii = 0; ii != nbuffers; ++ii){
1529  sdev->hbx[ii].req_len = sdev->req_len;
1530  }
1531  if (afs_reset_buffers(adev)){
1532  return -ERESTARTSYS;
1533  }
1534 
1535  if ((file->f_flags & O_NONBLOCK) != 0){
1536  file->f_op = &afs_fops_dma_poll;
1537  }else{
1538  file->f_op = &afs_fops_dma;
1539  }
1540 
1541  dev_dbg(pdev(adev), "99");
1542  return 0;
1543 }
1544 
1545 int afs_dma_release(struct inode *inode, struct file *file)
1546 {
1547  struct AFHBA_DEV *adev = PD(file)->dev;
1548  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
1549 
1550  struct HostBuffer *hb;
1551  struct HostBuffer *tmp;
1552 
1553  dev_dbg(pdev(adev), "afs_dma_release() 01 %s %d %p<-%p->%p",
1554  adev->name, PD(file)->minor,
1555  PD(file)->my_buffers.prev,
1556  &PD(file)->my_buffers,
1557  PD(file)->my_buffers.next);
1558 
1559  if (mutex_lock_interruptible(&sdev->list_mutex)){
1560  return -ERESTARTSYS;
1561  }
1562  list_for_each_entry_safe(hb, tmp, &PD(file)->my_buffers, list){
1563  dev_dbg(pdev(adev), "returning %d", hb->ibuf);
1564  return_empty(adev, hb);
1565  }
1566 
1567  mutex_unlock(&sdev->list_mutex);
1568 
1569  dev_dbg(pdev(adev), "90");
1570  spin_lock(&sdev->job_lock);
1571  sdev->job.please_stop = PS_PLEASE_STOP;
1572  sdev->job.on_push_dma_timeout = 0;
1573  sdev->job.on_pull_dma_timeout = 0;
1574  sdev->job.buffers_demand = 0;
1575  spin_unlock(&sdev->job_lock);
1576 
1577 
1578  if (sdev->onStopPull){
1579  sdev->onStopPull(adev);
1580  sdev->onStopPull = 0;
1581  }
1582  if (sdev->onStopPush){
1583  sdev->onStopPush(adev);
1584  sdev->onStopPush = 0;
1585  }
1586  sdev->pid = 0;
1587  if (sdev->user) kfree(sdev->user);
1588 
1589  return afhba_release(inode, file);
1590 }
1591 
1593  struct file *file, char __user *buf, size_t count, loff_t *f_pos)
1594 /* returns when buffer[s] available
1595  * data is buffer index as array of unsigned
1596  * return len is sizeof(array)
1597  */
1598 {
1599  struct AFHBA_DEV *adev = PD(file)->dev;
1600  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
1601  struct JOB *job = &sdev->job;
1602  int rc;
1603 
1604  dev_dbg(pdev(adev), "01 ps %u count %ld demand %d received %d waiting %d",
1605  (unsigned)*f_pos, (long)count,
1606  job->buffers_demand, job->buffers_received,
1607  !list_empty(&sdev->bp_full.list));
1608 
1609  if (job->buffers_received >= job->buffers_demand &&
1610  list_empty(&sdev->bp_full.list) ){
1611  dev_dbg(pdev(adev), "job done");
1612  return 0;
1613  }
1614 
1615  if (sdev->onStopPush == 0){
1617  }
1618 
1619  if (*f_pos == 0){
1620  rc = wait_event_interruptible(
1621  sdev->return_waitq, !list_empty(&sdev->bp_full.list));
1622  }else{
1623  rc = wait_event_interruptible_timeout(
1624  sdev->return_waitq,
1625  !list_empty(&sdev->bp_full.list), RX_TO);
1626  }
1627 
1628  dev_dbg(pdev(adev), "done waiting, rc %d", rc);
1629 
1630  if (rc < 0){
1631  dev_dbg(pdev(adev), "RESTART");
1632  return -ERESTARTSYS;
1633  }else if (mutex_lock_interruptible(&sdev->list_mutex)){
1634  return -ERESTARTSYS;
1635  }else{
1636  struct HostBuffer *hb;
1637  struct HostBuffer *tmp;
1638  int nbytes = 0;
1639  int backlog = 0;
1640  struct StreamBufferDef sbd;
1641 
1642  list_for_each_entry_safe(hb, tmp, &sdev->bp_full.list, list){
1643  if (nbytes+SBDSZ > count){
1644  dev_dbg(pdev(adev), "quit nbytes %d count %lu",
1645  nbytes, (long)count);
1646  break;
1647  }
1648 
1649  sbd.ibuf = IBUF_MAGIC|(backlog<<IBUF_IDX_SHL)|hb->ibuf;
1650  sbd.esta = hb->esta;
1651 
1652  if (copy_to_user(buf+nbytes, &sbd, SBDSZ)){
1653  rc = -EFAULT;
1654  goto read99;
1655  }
1656  dev_dbg(pdev(adev), "add my_buffers %d", hb->ibuf);
1657 
1658  list_move_tail(&hb->list, &PD(file)->my_buffers);
1659  hb->bstate = BS_FULL_APP;
1660  nbytes += SBDSZ;
1661  backlog++;
1662  }
1663 
1664  if (rc == 0 && nbytes == 0){
1665  dev_dbg(pdev(adev), "TIMEOUT");
1666  rc = -ETIMEDOUT;
1667  }else{
1668  *f_pos += nbytes;
1669  dev_dbg(pdev(adev), "return %d", nbytes);
1670  rc = nbytes;
1671  }
1672  }
1673 read99:
1674  mutex_unlock(&sdev->list_mutex);
1675  return rc;
1676 }
1677 
1678 static unsigned int afs_dma_poll(struct file* file, poll_table *poll_table)
1679 {
1680  struct AFHBA_DEV *adev = PD(file)->dev;
1681  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
1682  unsigned int mask = POLLOUT | POLLWRNORM;
1683  if (!list_empty(&sdev->bp_full.list)){
1684  mask |= POLLIN | POLLRDNORM;
1685  }else{
1686  poll_wait(file, &sdev->return_waitq, poll_table);
1687  if (!list_empty(&sdev->bp_full.list)){
1688  mask |= POLLIN | POLLRDNORM;
1689  }
1690  }
1691  return mask;
1692 }
1693 
1695  struct file *file, char __user *buf, size_t count, loff_t *f_pos)
1696 /* returns when buffer[s] available
1697  * data is buffer index as array of unsigned
1698  * return len is sizeof(array)
1699  */
1700 {
1701  struct AFHBA_DEV *adev = PD(file)->dev;
1702  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
1703  struct JOB *job = &sdev->job;
1704 
1705  int rc = 0;
1706  struct HostBuffer *hb;
1707  struct HostBuffer *tmp;
1708  int nbytes = 0;
1709 
1710  dev_dbg(pdev(adev), "01 ps %u count %ld demand %d received %d waiting %d",
1711  (unsigned)*f_pos, (long)count,
1712  job->buffers_demand, job->buffers_received,
1713  !list_empty(&sdev->bp_full.list) );
1714 
1715  if (job->buffers_received >= job->buffers_demand &&
1716  list_empty(&sdev->bp_full.list) ){
1717  dev_dbg(pdev(adev), "job done");
1718  return 0;
1719  }
1720 
1721  if (!afs_dma_started(adev, DMA_PUSH_SEL)){
1722  afs_start_dma(adev, DMA_PUSH_SEL);
1723  }
1724  if (queue_full_buffers(adev)){
1725  list_for_each_entry_safe(hb, tmp, &sdev->bp_full.list, list){
1726  if (nbytes+sizeof(int) > count){
1727  dev_dbg(pdev(adev), "quit nbytes %d count %lu",
1728  nbytes, (long)count);
1729  break;
1730  }
1731 
1732  if (copy_to_user(buf+nbytes, &hb->ibuf, sizeof(int))){
1733  rc = -EFAULT;
1734  goto read99;
1735  }
1736  dev_dbg(pdev(adev), "add my_buffers %d", hb->ibuf);
1737 
1738  list_move_tail(&hb->list, &PD(file)->my_buffers);
1739  hb->bstate = BS_FULL_APP;
1740  nbytes += sizeof(int);
1741  }
1742 
1743  if (rc == 0 && nbytes == 0){
1744  dev_dbg(pdev(adev), "TIMEOUT");
1745  rc = -ETIMEDOUT;
1746  }else{
1747  *f_pos += nbytes;
1748  dev_dbg(pdev(adev), "return %d", nbytes);
1749  rc = nbytes;
1750  }
1751  }
1752 read99:
1753  return rc;
1754 }
1755 
1756 
1757 
1758 
1760  struct file *file, const char *buf, size_t count, loff_t *f_pos)
1761 /* write completed data.
1762  * data is array of full buffer id's
1763  * id's are removed from full and placed onto empty.
1764  */
1765 {
1766  struct AFHBA_DEV *adev = PD(file)->dev;
1767  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
1768 
1769  int nbytes = 0;
1770  int rc = 0;
1771 
1772  dev_dbg(pdev(adev), "pos %u count %lu", (unsigned)*f_pos, (long)count);
1773 
1774  if (mutex_lock_interruptible(&sdev->list_mutex)){
1775  return -ERESTARTSYS;
1776  }
1777  while (nbytes+sizeof(int) <= count){
1778  int id;
1779 
1780  if (copy_from_user(&id, buf+nbytes, sizeof(int))){
1781  return -EFAULT;
1782  }
1783  dev_dbg(pdev(adev), "[%u] recycle buffer %d",
1784  (unsigned)(nbytes/sizeof(int)), id);
1785 
1786  if (id < 0){
1787  dev_err(pdev(adev), "ID < 0");
1788  rc = -100;
1789  goto write99;
1790  }else if (id >= nbuffers){
1791  dev_err(pdev(adev), "ID > NBUFFERS");
1792  rc = -101;
1793  goto write99;
1794  }else if (sdev->hbx[id].bstate != BS_FULL_APP){
1795  dev_err(pdev(adev), "STATE != BS_FULL_APP %d",
1796  sdev->hbx[id].bstate);
1797  rc = -102;
1798  goto write99;
1799  }else{
1800  struct HostBuffer *hb;
1801 
1802  rc = -1;
1803 
1804  list_for_each_entry(
1805  hb, &PD(file)->my_buffers, list){
1806 
1807  dev_dbg(pdev(adev), "listing %d", hb->ibuf);
1808  assert(hb != 0);
1809  assert(hb->ibuf >= 0 && hb->ibuf < nbuffers);
1810  if (hb->ibuf == id){
1811  return_empty(adev, hb);
1812  nbytes += sizeof(int);
1813  rc = 0;
1814  break;
1815  }
1816  }
1817  if (rc == -1){
1818  dev_err(pdev(adev), "ATTEMPT TO RET BUFFER NOT MINE");
1819  goto write99;
1820  }
1821  }
1822  }
1823 
1824  *f_pos += nbytes;
1825  rc = nbytes;
1826 
1827 write99:
1828  mutex_unlock(&sdev->list_mutex);
1829  dev_dbg(pdev(adev), "99 return %d", rc);
1830  return rc;
1831 }
1832 
1833 
1834 int fix_dma_buff_size(struct AB *ab, struct XLLC_DEF *xdef)
1835 /* descriptors are power of 2 * 1K .. attempt to fit in 2 descs.. */
1836 {
1837  int nblocks = ab->buffers[0].len/1024;
1838  int ii;
1839  for (ii = 0; ii < 16; ++ii){
1840  if (1<<ii > nblocks){
1841  int len1 = (1<<(ii-1))*1024;
1842  xdef[0] = ab->buffers[0];
1843  xdef[1].pa = xdef[0].pa + len1;
1844  xdef[1].len = xdef[0].len - len1;
1845  xdef[0].len = len1;
1846  xdef[2] = ab->buffers[1];
1847  xdef[3].pa = xdef[2].pa + len1;
1848  xdef[3].len = xdef[2].len - len1;
1849  xdef[2].len = len1;
1850  return 4;
1851  }else if (1<<ii == nblocks){
1852  xdef[0] = ab->buffers[0];
1853  xdef[1] = ab->buffers[1];
1854  return 2;
1855  }
1856  }
1857  printk("ERROR: fix_dma_descriptors BUFFER TOO LONG");
1858  return 0;
1859 }
1860 long afs_start_AI_AB(struct AFHBA_DEV *adev, struct AB *ab)
1861 {
1862  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
1863  struct JOB* job = &sdev->job;
1864  struct XLLC_DEF xdef[4];
1865  int nb;
1866 
1867  spin_lock(&sdev->job_lock);
1868  job->please_stop = PS_OFF;
1869  spin_unlock(&sdev->job_lock);
1870  sdev->onStopPush = afs_stop_llc_push;
1871 
1872  if (ab->buffers[0].pa == RTM_T_USE_HOSTBUF){
1873  ab->buffers[0].pa = sdev->hbx[0].pa;
1874  }
1875  if (ab->buffers[1].pa == RTM_T_USE_HOSTBUF){
1876  ab->buffers[1].pa = sdev->hbx[1].pa;
1877  }else if (ab->buffers[1].pa < buffer_len){
1878  /* pa is an offset in buffer 0 */
1879  ab->buffers[1].pa += ab->buffers[0].pa;
1880  }
1881  afs_dma_reset(adev, DMA_PUSH_SEL);
1882  if (use_llc_multi){
1883  afs_load_dram_descriptors_ll(adev, DMA_PUSH_SEL, ab->buffers, 2);
1884  }else{
1885  nb = fix_dma_buff_size(ab, xdef);
1886  afs_load_dram_descriptors(adev, DMA_PUSH_SEL, xdef, nb);
1887  }
1888  spin_lock(&sdev->job_lock);
1889  job->please_stop = PS_OFF;
1890  job->on_push_dma_timeout = 0;
1891  spin_unlock(&sdev->job_lock);
1892  return 0;
1893 }
1894 
1895 
1896 long afs_start_ABN(struct AFHBA_DEV *adev, struct ABN *abn, enum DMA_SEL dma_sel)
1897 {
1898  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
1899  struct JOB* job = &sdev->job;
1900  int ib;
1901 
1902  spin_lock(&sdev->job_lock);
1903  job->please_stop = PS_OFF;
1904  spin_unlock(&sdev->job_lock);
1905  if(dma_sel&DMA_PUSH_SEL){
1906  sdev->onStopPush = afs_stop_llc_push;
1907  }
1908  if (dma_sel&DMA_PULL_SEL){
1909  sdev->onStopPull = afs_stop_llc_pull;
1910  }
1911 
1912  dev_dbg(pdev(adev), "%s descriptors:%d %s", __FUNCTION__, abn->ndesc,
1913  abn->buffers[0].pa == RTM_T_USE_HOSTBUF? "RTM_T_USE_HOSTBUF": "USER_ADDR");
1914 
1915  if (abn->buffers[0].pa == RTM_T_USE_HOSTBUF){
1916  abn->buffers[0].pa = sdev->hbx[0].pa;
1917 
1918  for (ib = 1; ib < abn->ndesc; ++ib){
1919  abn->buffers[ib].pa = abn->buffers[ib-1].pa + PAGE_SIZE;
1920  }
1921  }
1922 
1923  for (ib = 0; ib < abn->ndesc; ++ib){
1924  dev_dbg(pdev(adev), "%s [%2d] pa:%08x len:%d",
1925  __FUNCTION__, ib, abn->buffers[ib].pa, abn->buffers[ib].len);
1926  }
1927 
1928  afs_load_dram_descriptors_ll(adev, dma_sel, abn->buffers, abn->ndesc);
1929 
1930  spin_lock(&sdev->job_lock);
1931  job->please_stop = PS_OFF;
1932 
1933  if(dma_sel&DMA_PUSH_SEL){
1934  job->on_push_dma_timeout = 0;
1935  }
1936  if (dma_sel&DMA_PULL_SEL){
1937  job->on_pull_dma_timeout = 0;
1938  }
1939  spin_unlock(&sdev->job_lock);
1940  return 0;
1941 }
1942 
1943 
1944 long afs_dma_ioctl(struct file *file,
1945  unsigned int cmd, unsigned long arg)
1946 {
1947  struct AFHBA_DEV *adev = PD(file)->dev;
1948  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
1949  void* varg = (void*)arg;
1950 
1951 
1952  switch(cmd){
1953  case RTM_T_START_STREAM:
1954  return rtm_t_start_stream(adev, transfer_buffers);
1955  case RTM_T_START_STREAM_MAX: {
1956  u32 my_transfer_buffers;
1957  COPY_FROM_USER(&my_transfer_buffers, varg, sizeof(u32));
1958  return rtm_t_start_stream(adev, my_transfer_buffers);
1959  }
1960  case AFHBA_START_AI_LLC : {
1961  struct XLLC_DEF xllc_def;
1962  long rc;
1963  COPY_FROM_USER(&xllc_def, varg, sizeof(struct XLLC_DEF));
1964  rc = afs_start_ai_llc(adev, &xllc_def);
1965  COPY_TO_USER(varg, &xllc_def, sizeof(struct XLLC_DEF));
1966  return rc;
1967  }
1968  case AFHBA_START_AO_LLC : {
1969  struct XLLC_DEF xllc_def;
1970  long rc;
1971  COPY_FROM_USER(&xllc_def, varg, sizeof(struct XLLC_DEF));
1972  rc = afs_start_ao_llc(adev, &xllc_def);
1973  COPY_TO_USER(varg, &xllc_def, sizeof(struct XLLC_DEF));
1974  return rc;
1975  }
1976  case AFHBA_START_AI_AB: {
1977  struct AB ab;
1978  long rc;
1979  COPY_FROM_USER(&ab, varg, sizeof(struct AB));
1980  rc = afs_start_AI_AB(adev, &ab);
1981  COPY_TO_USER(varg, &ab, sizeof(struct AB));
1982  return rc;
1983  }
1984  case AFHBA_START_AI_ABN:
1985  case AFHBA_START_AO_ABN:
1986  {
1987  struct ABN *abn = kmalloc(sizeof(struct ABN), GFP_KERNEL);
1988  long rc;
1989  COPY_FROM_USER(abn, varg, sizeof(struct ABN));
1990  if (cmd == AFHBA_START_AI_ABN){
1991  rc = afs_start_ABN(adev, abn, DMA_PUSH_SEL);
1992  }else{
1993  rc = afs_start_ABN(adev, abn, DMA_PULL_SEL);
1994  }
1995  COPY_TO_USER(varg, abn, sizeof(struct ABN));
1996  return rc;
1997  }
1998  case AFHBA_AO_BURST_INIT:
1999  {
2000  if (sdev->user){
2001  dev_err(pdev(adev), "AFHBA_AO_BURST_INIT other user data active");
2002  return -EBUSY;
2003  }else{
2004  u32 dma_ctrl = DMA_CTRL_RD(adev);
2005  dma_ctrl |= dma_pp(DMA_PULL_SEL, DMA_CTRL_EN);
2006  sdev->pull_descr_ram = 0;
2008  afs_load_pull_descriptor(adev, 0);
2009  DMA_CTRL_WR(adev, dma_ctrl);
2010  return 0;
2011  }
2012  }
2013  case AFHBA_AO_BURST_SETBUF:
2014  {
2015  u32 srcix = arg;
2016  afs_load_pull_descriptor(adev, srcix);
2017  return 0;
2018  }
2019  default:
2020  return -ENOTTY;
2021  }
2022 
2023 }
2024 
2025 int afs_mmap_host(struct file* file, struct vm_area_struct* vma)
2029 {
2030  struct AFHBA_DEV *adev = PD(file)->dev;
2031  struct AFHBA_STREAM_DEV *sdev = adev->stream_dev;
2032  int minor = PD(vma->vm_file)->minor;
2033 
2034  int ibuf = minor<=NBUFFERS_MASK? minor&NBUFFERS_MASK: 0;
2035  struct HostBuffer *hb = &sdev->hbx[ibuf];
2036  unsigned long vsize = vma->vm_end - vma->vm_start;
2037  unsigned long psize = hb->len;
2038  unsigned pfn = hb->pa >> PAGE_SHIFT;
2039 
2040  dev_dbg(pdev(adev), "%c vsize %lu psize %lu %s",
2041  'D', vsize, psize, vsize>psize? "EINVAL": "OK");
2042 
2043  if (vsize > psize){
2044  return -EINVAL; /* request too big */
2045  }
2046  if (remap_pfn_range(
2047  vma, vma->vm_start, pfn, vsize, vma->vm_page_prot)){
2048  return -EAGAIN;
2049  }else{
2050  return 0;
2051  }
2052 }
2053 
2054 static struct file_operations afs_fops_dma = {
2055  .open = afs_dma_open,
2056  .release = afs_dma_release,
2057  .read = afs_dma_read,
2058  .poll = afs_dma_poll,
2059  .write = afs_dma_write,
2060  .unlocked_ioctl = afs_dma_ioctl,
2061  .mmap = afs_mmap_host
2062 };
2063 
2064 static struct file_operations afs_fops_dma_poll = {
2065  .open = afs_dma_open,
2066  .release = afs_dma_release,
2067  .read = afs_dma_read_poll,
2068  .poll = afs_dma_poll,
2069  .write = afs_dma_write,
2070  .unlocked_ioctl = afs_dma_ioctl,
2071  .mmap = afs_mmap_host
2072 };
2073 
2074 
2075 int afs_open(struct inode *inode, struct file *file)
2076 {
2077  struct AFHBA_DEV *adev = DEV(file);
2078 
2079  dev_dbg(pdev(adev), "01");
2080  if (adev == 0){
2081  return -ENODEV;
2082  }
2083  dev_dbg(pdev(adev), "33: minor %d", PD(file)->minor);
2084 
2085  switch((PD(file)->minor)){
2086  case MINOR_DMAREAD:
2087  return afs_dma_open(inode, file);
2088  case MINOR_DATA_FIFO:
2089  return afs_histo_open(
2090  inode, file,
2092  case MINOR_DESC_FIFO:
2093  return afs_histo_open(
2094  inode, file,
2096  case MINOR_CATCHUP_HISTO:
2097  return afs_histo_open(
2098  inode, file,
2100  default:
2101  if (PD(file)->minor <= NBUFFERS_MASK){
2102  return 0;
2103  }else{
2104  dev_err(pdev(adev),"99 adev %p name %s", adev, adev->name);
2105  return -ENODEV;
2106  }
2107  }
2108 
2109 }
2110 
2111 static struct file_operations afs_fops = {
2112  .open = afs_open,
2113  .mmap = afs_mmap_host,
2114  .release = afhba_release,
2115 };
2116 
2117 static ssize_t show_zmod_id(
2118  struct device * dev,
2119  struct device_attribute *attr,
2120  char * buf)
2121 {
2122  struct AFHBA_DEV *adev = afhba_lookupDeviceFromClass(dev);
2123  return sprintf(buf, "0x%08x\n", _afs_read_zynqreg(adev, Z_MOD_ID));
2124 }
2125 
2126 static DEVICE_ATTR(z_mod_id, S_IRUGO, show_zmod_id, 0);
2127 
2128 static ssize_t show_z_ident(
2129  struct device * dev,
2130  struct device_attribute *attr,
2131  char * buf)
2132 {
2133  struct AFHBA_DEV *adev = afhba_lookupDeviceFromClass(dev);
2134  return sprintf(buf, "0x%08x\n", _afs_read_zynqreg(adev, Z_IDENT));
2135 }
2136 
2137 static DEVICE_ATTR(z_ident, S_IRUGO, show_z_ident, 0);
2138 
2139 static ssize_t show_acq_model(
2140  struct device * dev,
2141  struct device_attribute *attr,
2142  char * buf)
2143 {
2144  struct AFHBA_DEV *adev = afhba_lookupDeviceFromClass(dev);
2145  unsigned model = _afs_read_zynqreg(adev, Z_IDENT) >> 16;
2146  model = model&0x2106;
2147  return sprintf(buf, "acq%04x\n", model);
2148 }
2149 
2150 static DEVICE_ATTR(acq_model, S_IRUGO, show_acq_model, 0);
2151 
2152 static ssize_t show_acq_port(
2153  struct device * dev,
2154  struct device_attribute *attr,
2155  char * buf)
2156 {
2157  struct AFHBA_DEV *adev = afhba_lookupDeviceFromClass(dev);
2158  unsigned comms = _afs_read_zynqreg(adev, Z_IDENT) >> 20;
2159  comms = comms&0xf;
2160  return sprintf(buf, "%X\n", comms);
2161 }
2162 
2163 static DEVICE_ATTR(acq_port, S_IRUGO, show_acq_port, 0);
2164 
2165 static ssize_t show_acq_ident(
2166  struct device * dev,
2167  struct device_attribute *attr,
2168  char * buf)
2169 {
2170  struct AFHBA_DEV *adev = afhba_lookupDeviceFromClass(dev);
2171  unsigned z_ident = _afs_read_zynqreg(adev, Z_IDENT);
2172  unsigned ident = z_ident&0x0ffff;
2173  unsigned model = (z_ident >> 16) & 0x2106;
2174 
2175  return sprintf(buf, "acq%04x_%03d\n", model, ident);
2176 }
2177 
2178 static DEVICE_ATTR(acq_ident, S_IRUGO, show_acq_ident, 0);
2179 
2180 static ssize_t store_com_trg(
2181  struct device * dev,
2182  struct device_attribute *attr,
2183  const char * buf, size_t count)
2184 {
2185  struct AFHBA_DEV *adev = afhba_lookupDeviceFromClass(dev);
2186  unsigned tv;
2187 
2188  if (sscanf(buf, "%x", &tv) == 1){
2190  afhba_write_reg(adev, HOST_TEST_REG, tv); /* forces 1usec high time */
2193  return strlen(buf);
2194  }else{
2195  return -1;
2196  }
2197 }
2198 
2199 static DEVICE_ATTR(com_trg, (S_IWUSR|S_IWGRP), 0, store_com_trg);
2200 
2201 
2202 static const struct attribute *dev_attrs[] = {
2203  &dev_attr_com_trg.attr, /* must be first */
2204  &dev_attr_z_mod_id.attr,
2205  &dev_attr_z_ident.attr,
2206  &dev_attr_acq_model.attr,
2207  &dev_attr_acq_port.attr,
2208  &dev_attr_acq_ident.attr,
2209  NULL
2210 };
2211 
2212 
2213 void afs_create_sysfs(struct AFHBA_DEV *adev)
2214 {
2215  const struct attribute ** attrs = dev_attrs;
2216  int rc;
2217 
2218  if (adev->remote_com_bar == -1 ){
2219  ++attrs;
2220  }
2221  rc = sysfs_create_files(&adev->class_dev->kobj, attrs);
2222  if (rc){
2223  dev_err(pdev(adev), "failed to create files");
2224  return;
2225  }
2226 }
2227 
2228 
2230 {
2231  adev->stream_dev = kzalloc(sizeof(struct AFHBA_STREAM_DEV), GFP_KERNEL);
2232 
2233  dev_info(pdev(adev), "afhba_stream_drv_init %s name:%s idx:%d", REVID, adev->name, adev->idx);
2234 
2235  afs_init_buffers(adev);
2236 
2237  if (cos_interrupt_ok && adev->peer == 0){
2238  hook_interrupts(adev);
2239  }
2240  startWork(adev);
2241  adev->stream_fops = &afs_fops;
2242  afs_init_procfs(adev);
2243  afs_create_sysfs(adev);
2244  return 0;
2245 }
2247 {
2248  dev_info(pdev(adev), "afhba_stream_drv_del()");
2249  afs_init_dma_clr(adev);
2250  stopWork(adev);
2251  return 0;
2252 }
PAGE_SIZE
#define PAGE_SIZE
Definition: AcqHw.cpp:32
DMA_PUSH_DESC_LEN
@ DMA_PUSH_DESC_LEN
Definition: acq-fiber-hba.h:307
AFHBA_STREAM_DEV::push_descr_ram
bool push_descr_ram
Definition: afhba_stream_drv.h:146
AFHBA_DEV::sfp
enum AFHBA_DEV::SFP sfp
AFHBA_STREAM_DEV::push_dma_timeouts
int push_dma_timeouts
Definition: afhba_stream_drv.h:113
afs_start_ABN
long afs_start_ABN(struct AFHBA_DEV *adev, struct ABN *abn, enum DMA_SEL dma_sel)
Definition: afhba_stream_drv.c:1896
AFHBA_DEV::PciMapping::va
void * va
Definition: acq-fiber-hba.h:125
COM_SOFT_TRIGGER_EN
#define COM_SOFT_TRIGGER_EN
Definition: acq-fiber-hba.h:398
afhba_lookupDeviceFromClass
struct AFHBA_DEV * afhba_lookupDeviceFromClass(struct device *dev)
Definition: afhba_devman.c:52
RTM_T_USE_HOSTBUF
#define RTM_T_USE_HOSTBUF
Definition: rtm-t_ioctl.h:75
HostBuffer::pa
u32 pa
Definition: acq-fiber-hba.h:97
afs_dma_read_poll
ssize_t afs_dma_read_poll(struct file *file, char __user *buf, size_t count, loff_t *f_pos)
Definition: afhba_stream_drv.c:1694
DMA_DESC_FIFSTA_RD
#define DMA_DESC_FIFSTA_RD(adev)
Definition: afhba_stream_drv.h:239
Z_MOD_ID
@ Z_MOD_ID
Definition: acq-fiber-hba.h:273
AB::buffers
struct XLLC_DEF buffers[2]
Definition: rtm-t_ioctl.h:52
abn
struct ABN abn
Definition: InlineDataHandlerMuxAO_LLC.cpp:56
SBDSZ
#define SBDSZ
Definition: rtm-t_ioctl.h:127
stop_on_skipped_buffer
int stop_on_skipped_buffer
Definition: afhba_stream_drv.c:82
aurora_monitor
int aurora_monitor
Definition: afhba_stream_drv.c:93
aurora_to_ms
int aurora_to_ms
Definition: afhba_stream_drv.c:89
AFDMAC_DESC_ADDR_MASK
#define AFDMAC_DESC_ADDR_MASK
Definition: afhba_stream_drv.h:29
MINOR_DATA_FIFO
#define MINOR_DATA_FIFO
Definition: afhba_minor.h:15
afs_start_ao_llc
long afs_start_ao_llc(struct AFHBA_DEV *adev, struct XLLC_DEF *xllc_def)
Definition: afhba_stream_drv.c:1490
AFHBA_STREAM_DEV::nbuffers
int nbuffers
Definition: afhba_stream_drv.h:82
AFHBA_STREAM_DEV::hbx
struct HostBuffer * hbx
Definition: afhba_stream_drv.h:84
pdev
#define pdev(adev)
Definition: acq-fiber-hba.h:176
DMA_CTRL_WR
#define DMA_CTRL_WR(adev, value)
Definition: afhba_stream_drv.h:178
AFHBA_AO_BURST_SETBUF
#define AFHBA_AO_BURST_SETBUF
Definition: rtm-t_ioctl.h:112
OLD
#define OLD
IBUF_IDX_SHL
#define IBUF_IDX_SHL
Definition: rtm-t_ioctl.h:124
Z_IDENT
@ Z_IDENT
Definition: acq-fiber-hba.h:278
afs_dma_read
ssize_t afs_dma_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos)
Definition: afhba_stream_drv.c:1592
ABN::ndesc
int ndesc
Definition: rtm-t_ioctl.h:58
RTDMAC_DESC_FIFO_CNT
#define RTDMAC_DESC_FIFO_CNT
Definition: afhba_stream_drv.c:781
HostBuffer::BS_EMPTY
@ BS_EMPTY
Definition: acq-fiber-hba.h:103
AFHBA_DEV::ACR
unsigned ACR
Definition: acq-fiber-hba.h:135
AFHBA_DEV::mon_name
char mon_name[16]
Definition: acq-fiber-hba.h:113
afhba_read_reg
u32 afhba_read_reg(struct AFHBA_DEV *adev, int regoff)
Definition: afhba_core.c:48
StreamBufferDef::ibuf
u32 ibuf
Definition: rtm-t_ioctl.h:118
AFHBA_DEV::link_up
int link_up
Definition: acq-fiber-hba.h:148
AFHBA_STREAM_DEV::WORK::w_to_do
unsigned long w_to_do
Definition: afhba_stream_drv.h:122
AFHBA_DEV::remote_com_bar
int remote_com_bar
Definition: acq-fiber-hba.h:130
DEV_DBG
#define DEV_DBG
Definition: afhba_stream_drv.h:61
ASR
#define ASR(acr)
Definition: acq-fiber-hba.h:224
AFHBA_STREAM_DEV::WORK::w_waitq
wait_queue_head_t w_waitq
Definition: afhba_stream_drv.h:121
AFHBA_START_AI_ABN
#define AFHBA_START_AI_ABN
Definition: rtm-t_ioctl.h:104
afhba_release
int afhba_release(struct inode *inode, struct file *file)
Definition: acq-fiber-hba.c:241
AFHBA_STREAM_DEV::shot
int shot
Definition: afhba_stream_drv.h:150
afs_init_buffers
int afs_init_buffers(struct AFHBA_DEV *adev)
Definition: afhba_stream_drv.c:1001
CHANGE_STATE
#define CHANGE_STATE(s)
AFHBA_STREAM_DEV::job_lock
spinlock_t job_lock
Definition: afhba_stream_drv.h:111
afs_start_ai_llc
long afs_start_ai_llc(struct AFHBA_DEV *adev, struct XLLC_DEF *xllc_def)
Definition: afhba_stream_drv.c:1468
afs_stop_stream_push
void afs_stop_stream_push(struct AFHBA_DEV *adev)
Definition: afhba_stream_drv.c:1438
stalls
int stalls
Definition: afhba_stream_drv.c:65
afs_dma_reset
#define afs_dma_reset(adev, dma_sel)
Definition: afhba_stream_drv.h:219
MINOR_DMAREAD
#define MINOR_DMAREAD
Definition: afhba_minor.h:13
AFHBA_AURORA_STAT_LANE_UP
#define AFHBA_AURORA_STAT_LANE_UP
Definition: acq-fiber-hba.h:256
eot_interrupt
int eot_interrupt
Definition: afhba_stream_drv.c:97
afs_stop_dma
#define afs_stop_dma(adev, dma_sel)
Definition: afhba_stream_drv.h:233
afs_mmap_host
int afs_mmap_host(struct file *file, struct vm_area_struct *vma)
mmap the host buffer.
Definition: afhba_stream_drv.c:2025
AFHBA_AURORA_CTRL_TXDIS
#define AFHBA_AURORA_CTRL_TXDIS
Definition: acq-fiber-hba.h:244
afs_start_AI_AB
long afs_start_AI_AB(struct AFHBA_DEV *adev, struct AB *ab)
Definition: afhba_stream_drv.c:1860
AFHBA_START_AO_ABN
#define AFHBA_START_AO_ABN
ioctl AFHBA_START_AI_ABN LLC, multiple buffers, INPUT
Definition: rtm-t_ioctl.h:106
AFHBA_STREAM_DEV::dma_regs
u32 dma_regs[DMA_REGS_COUNT]
Definition: afhba_stream_drv.h:130
XLLC_DEF::pa
u32 pa
SRC or DST buffer PA - round to 1K RTM_T_USE_HOSTBUF=> use Driver buffer 0.
Definition: rtm-t_ioctl.h:47
WORK_REQUEST
#define WORK_REQUEST
Definition: afhba_stream_drv.h:124
AFHBA_STREAM_DEV::JOB::push_llc_def
struct XLLC_DEF push_llc_def
Definition: afhba_stream_drv.h:105
NBUFFERS
#define NBUFFERS
Definition: afhba_stream_drv.h:14
afs_create_sysfs
void afs_create_sysfs(struct AFHBA_DEV *adev)
Definition: afhba_stream_drv.c:2213
LL_MAX_LEN
#define LL_MAX_LEN
Definition: afhba_stream_drv.c:470
DMA_CTRL_EN
#define DMA_CTRL_EN
Definition: acq-fiber-hba.h:338
AFHBA_STREAM_DEV::ZI_GOOD
@ ZI_GOOD
Definition: afhba_stream_drv.h:148
AFDMAC_DESC_LEN_MASK
#define AFDMAC_DESC_LEN_MASK
Definition: afhba_stream_drv.h:34
XLLC_DEF::len
unsigned len
length in bytes - will round up to next %64
Definition: rtm-t_ioctl.h:46
push_dma_timeout
int push_dma_timeout(struct AFHBA_DEV *adev)
Definition: afhba_stream_drv.c:1450
DMA_CTRL_RAM
#define DMA_CTRL_RAM
Definition: acq-fiber-hba.h:342
AFDMAC_DESC_EOT
#define AFDMAC_DESC_EOT
Definition: afhba_stream_drv.h:32
PS_OFF
@ PS_OFF
Definition: afhba_stream_drv.h:53
max_empty_backlog_check
int max_empty_backlog_check
Definition: afhba_stream_drv.c:124
DEF_BUFFER_LEN
#define DEF_BUFFER_LEN
Definition: afhba_stream_drv.c:46
HostBuffer::ibuf
int ibuf
Definition: acq-fiber-hba.h:95
cos_interrupt_ok
int cos_interrupt_ok
Definition: afhba_stream_drv.c:101
COPY_FROM_USER
#define COPY_FROM_USER(to, from, len)
Definition: afhba_stream_drv.c:193
XLLC_DEF
Definition: rtm-t_ioctl.h:45
AFHBA_STREAM_DEV::req_len
int req_len
Definition: afhba_stream_drv.h:133
AFHBA_STREAM_DEV::pull_ram_cursor
int pull_ram_cursor
Definition: afhba_stream_drv.h:137
NEW
#define NEW
EMPTY1
#define EMPTY1
Definition: afhba_stream_drv.h:244
AFDMAC_DESC_LEN_SHL
#define AFDMAC_DESC_LEN_SHL
Definition: afhba_stream_drv.h:35
afs_start_dma
#define afs_start_dma(adev, dma_sel)
Definition: afhba_stream_drv.h:226
hb_from_descr
struct HostBuffer * hb_from_descr(struct AFHBA_DEV *adev, u32 inflight_descr)
Definition: afhba_stream_drv.c:853
AFHBA_STREAM_DEV::WORK::mon_task
struct task_struct * mon_task
Definition: afhba_stream_drv.h:125
HOST_PCIE_INTERRUPT_REG
#define HOST_PCIE_INTERRUPT_REG
Definition: acq-fiber-hba.h:191
StreamBufferDef::esta
u32 esta
Definition: rtm-t_ioctl.h:119
afs_dma_open
int afs_dma_open(struct inode *inode, struct file *file)
Definition: afhba_stream_drv.c:1505
DMA_CTRL_PUSH_SHL
#define DMA_CTRL_PUSH_SHL
Definition: acq-fiber-hba.h:336
AFHBA_STREAM_DEV::list_mutex
struct mutex list_mutex
Definition: afhba_stream_drv.h:68
AFHBA_STREAM_DEV::onStopPull
void(* onStopPull)(struct AFHBA_DEV *adev)
Definition: afhba_stream_drv.h:134
AFDMAC_DESC_ID_MASK
#define AFDMAC_DESC_ID_MASK
Definition: afhba_stream_drv.h:37
RTM_T_START_STREAM_MAX
#define RTM_T_START_STREAM_MAX
ioctl Start High Throughput Streaming specify max buffers.
Definition: rtm-t_ioctl.h:85
AFHBA_STREAM_DEV::desc_fifo_histo
unsigned * desc_fifo_histo
Definition: afhba_stream_drv.h:116
init_descriptors_ht
void init_descriptors_ht(struct AFHBA_STREAM_DEV *sdev)
Definition: afhba_stream_drv.c:156
AFHBA_STREAM_DEV::init_descriptors
void(* init_descriptors)(struct AFHBA_STREAM_DEV *sdev)
Definition: afhba_stream_drv.h:118
PCIE_CNTRL
@ PCIE_CNTRL
Definition: acq-fiber-hba.h:286
AFHBA_STREAM_DEV::JOB::on_push_dma_timeout
int(* on_push_dma_timeout)(struct AFHBA_DEV *adev)
Definition: afhba_stream_drv.h:107
HostBuffer::bstate
enum HostBuffer::BSTATE bstate
DMA_PULL_SEL
@ DMA_PULL_SEL
Definition: acq-fiber-hba.h:316
AFHBA_DEV::peer
struct AFHBA_DEV * peer
Definition: acq-fiber-hba.h:132
__afs_start_dma
void __afs_start_dma(struct AFHBA_DEV *adev, u32 dma_sel)
Definition: afhba_stream_drv.c:570
__afs_stop_dma
void __afs_stop_dma(struct AFHBA_DEV *adev, u32 dma_sel)
Definition: afhba_stream_drv.c:563
dma_descriptor_ram
int dma_descriptor_ram
Definition: afhba_stream_drv.c:109
AFHBA_DEV::stream_dev
struct AFHBA_STREAM_DEV * stream_dev
Definition: acq-fiber-hba.h:143
DMA_PULL_DESC_LEN
@ DMA_PULL_DESC_LEN
Definition: acq-fiber-hba.h:308
AFHBA_STREAM_DEV::zi_report
enum AFHBA_STREAM_DEV::ZI_REPORT zi_report
DMA_CTRL_CLR
#define DMA_CTRL_CLR(adev, bits)
Definition: afhba_stream_drv.h:185
afs_stop_llc_push
void afs_stop_llc_push(struct AFHBA_DEV *adev)
Definition: afhba_stream_drv.c:1423
buffer_len
int buffer_len
Definition: afhba_stream_drv.c:77
HostBuffer::esta
u32 esta
Definition: acq-fiber-hba.h:105
RX_TO
int RX_TO
Definition: afhba_stream_drv.c:48
PD
#define PD(file)
Definition: acq-fiber-hba.h:173
use_llc_multi
int use_llc_multi
Definition: afhba_stream_drv.c:130
HostBuffer::BS_FULL_APP
@ BS_FULL_APP
Definition: acq-fiber-hba.h:103
ABN
Definition: rtm-t_ioctl.h:57
err
#define err(format, arg...)
Definition: local.h:121
ib
int ib
Definition: InlineDataHandlerMuxAO_LLC.cpp:57
AFHBA_STREAM_DEV::pull_descr_ram
bool pull_descr_ram
Definition: afhba_stream_drv.h:147
afs_init_procfs
int afs_init_procfs(struct AFHBA_DEV *adev)
Definition: afs_procfs.c:296
afs_comms_ready
int afs_comms_ready(struct AFHBA_DEV *adev)
Definition: afhba_stream_drv.c:773
AFHBA_STREAM_DEV::JOB::please_stop
int please_stop
Definition: afhba_stream_drv.h:94
afhba_stream_drv_init
int afhba_stream_drv_init(struct AFHBA_DEV *adev)
Definition: afhba_stream_drv.c:2229
afs_histo_open
int afs_histo_open(struct inode *inode, struct file *file, unsigned *histo, int hcount)
Definition: afhba_stream_drv.c:1382
DMA_SEL
DMA_SEL
Definition: acq-fiber-hba.h:314
DMA_PUSH_DESC_FIFO
#define DMA_PUSH_DESC_FIFO
Definition: acq-fiber-hba.h:330
DMA_DIR_DESC_FIFO
#define DMA_DIR_DESC_FIFO(dma_sel)
Definition: acq-fiber-hba.h:362
DMA_PUSH_DESC_STA_RD
#define DMA_PUSH_DESC_STA_RD(adev)
Definition: afhba_stream_drv.h:240
WORK_TO
int WORK_TO
Definition: afhba_stream_drv.c:52
afhba_stream_drv.h
PCIE_BUFFER_CTRL
@ PCIE_BUFFER_CTRL
Definition: acq-fiber-hba.h:292
PCIE_BASE
#define PCIE_BASE
Definition: acq-fiber-hba.h:283
AFHBA_DEV::idx
int idx
Definition: acq-fiber-hba.h:117
AFHBA_START_AI_LLC
#define AFHBA_START_AI_LLC
ioctl ACQ2106 Start Low Latency Control Inbound outputs actual pa used
Definition: rtm-t_ioctl.h:90
AFHBA_STREAM_DEV::JOB::buffers_queued
unsigned buffers_queued
Definition: afhba_stream_drv.h:91
NBUFFERS_MASK
#define NBUFFERS_MASK
Definition: afhba_stream_drv.h:18
AFHBA_DEV::class_dev
struct device * class_dev
Definition: acq-fiber-hba.h:116
AFHBA_AURORA_CTRL_CLR
#define AFHBA_AURORA_CTRL_CLR
Definition: acq-fiber-hba.h:245
module_param
module_param(RX_TO, int, 0644)
DMA_PUSH_DESC_RAM
#define DMA_PUSH_DESC_RAM
Definition: acq-fiber-hba.h:358
MINOR_DESC_FIFO
#define MINOR_DESC_FIFO
Definition: afhba_minor.h:16
SMOO
int SMOO
Definition: afhba_stream_drv.c:61
HostBuffer::descr
u32 descr
Definition: acq-fiber-hba.h:100
fix_dma_buff_size
int fix_dma_buff_size(struct AB *ab, struct XLLC_DEF *xdef)
Definition: afhba_stream_drv.c:1834
afs_dma_release
int afs_dma_release(struct inode *inode, struct file *file)
Definition: afhba_stream_drv.c:1545
AFHBA_STREAM_DEV::JOB::catchup_histo
unsigned catchup_histo[NBUFFERS]
Definition: afhba_stream_drv.h:102
AFHBA_DEV::aurora_status_read_count
int aurora_status_read_count
Definition: acq-fiber-hba.h:150
HB_ENTRY
#define HB_ENTRY(plist)
Definition: afhba_stream_drv.h:250
RTM_T_START_STREAM
#define RTM_T_START_STREAM
ioctl Start High Throughput Streaming
Definition: rtm-t_ioctl.h:79
MSLEEP_TO
#define MSLEEP_TO
Definition: afhba_stream_drv.c:635
afs_open
int afs_open(struct inode *inode, struct file *file)
Definition: afhba_stream_drv.c:2075
DMA_DIR_DESC_RAM
#define DMA_DIR_DESC_RAM(dma_sel)
Definition: acq-fiber-hba.h:368
afs_load_pull_descriptor
void afs_load_pull_descriptor(struct AFHBA_DEV *adev, int idesc)
Definition: afhba_stream_drv.c:353
RTDMAC_DATA_FIFO_CNT
#define RTDMAC_DATA_FIFO_CNT
Definition: afhba_stream_drv.c:780
transfer_buffers
int transfer_buffers
Definition: afhba_stream_drv.c:85
AFHBA_STREAM_DEV::return_waitq
wait_queue_head_t return_waitq
Definition: afhba_stream_drv.h:128
AFHBA_DEV
Definition: acq-fiber-hba.h:111
afs_stop_llc_pull
void afs_stop_llc_pull(struct AFHBA_DEV *adev)
Definition: afhba_stream_drv.c:1432
REVID
#define REVID
Definition: afhba_stream_drv.c:44
DMA_CTRL_SET
#define DMA_CTRL_SET(adev, bits)
Definition: afhba_stream_drv.h:196
AFHBA_STREAM_DEV::job
struct AFHBA_STREAM_DEV::JOB job
HostBuffer::va
void * va
Definition: acq-fiber-hba.h:96
RATE
#define RATE
AFHBA_STREAM_DEV::onStopPush
void(* onStopPush)(struct AFHBA_DEV *adev)
Definition: afhba_stream_drv.h:135
DMA_BASE
#define DMA_BASE
Definition: acq-fiber-hba.h:297
HOST_TEST_REG
#define HOST_TEST_REG
Definition: acq-fiber-hba.h:197
DMA_DIR_DESC_LEN
#define DMA_DIR_DESC_LEN(dma_sel)
Definition: acq-fiber-hba.h:365
COPY_TO_USER
#define COPY_TO_USER(to, from, len)
Definition: afhba_stream_drv.c:196
load_buffers
int load_buffers(struct AFHBA_DEV *adev)
Definition: afhba_stream_drv.c:1175
AFHBA_AURORA_CTRL_ENA
#define AFHBA_AURORA_CTRL_ENA
Definition: acq-fiber-hba.h:243
DATA_FIFO_SZ
#define DATA_FIFO_SZ
Definition: afhba_stream_drv.c:783
DMA_CTRL_FIFO_RST
#define DMA_CTRL_FIFO_RST
Definition: acq-fiber-hba.h:339
_afs_write_comreg
void _afs_write_comreg(struct AFHBA_DEV *adev, int regoff, u32 value)
Definition: afhba_stream_drv.c:296
DMA_PUSH_SEL
@ DMA_PUSH_SEL
Definition: acq-fiber-hba.h:315
job_is_go
int job_is_go(struct JOB *job)
Definition: afhba_stream_drv.c:1169
ZYNQ_BASE
#define ZYNQ_BASE
Definition: acq-fiber-hba.h:270
DEV
#define DEV(file)
Definition: acq-fiber-hba.h:174
LL_BLOCK
#define LL_BLOCK
Definition: afhba_stream_drv.c:468
AFHBA_STREAM_DEV::JOB::dma_started
int dma_started
Definition: afhba_stream_drv.h:103
PS_PLEASE_STOP
@ PS_PLEASE_STOP
Definition: afhba_stream_drv.h:53
aurora_status_read_count
int aurora_status_read_count
Definition: afhba_stream_drv.c:105
ABN::buffers
struct XLLC_DEF buffers[MAXABN]
Definition: rtm-t_ioctl.h:59
AFHBA_STREAM_DEV::buffer_len
int buffer_len
Definition: afhba_stream_drv.h:83
AFHBA_DEV::name
char name[16]
Definition: acq-fiber-hba.h:112
AFHBA_STREAM_DEV::comms_init_done
int comms_init_done
Definition: afhba_stream_drv.h:141
DMA_CTRL_LOW_LAT
#define DMA_CTRL_LOW_LAT
Definition: acq-fiber-hba.h:340
AFHBA_STREAM_DEV::work
struct AFHBA_STREAM_DEV::WORK work
AFHBA_STREAM_DEV::WORK::w_task
struct task_struct * w_task
Definition: afhba_stream_drv.h:123
LL_NB
#define LL_NB(cnt)
Definition: afhba_stream_drv.c:469
afs_histo_read
ssize_t afs_histo_read(struct file *file, char *buf, size_t count, loff_t *f_pos)
Definition: afhba_stream_drv.c:1327
assume_stuck_buffers_are_ok
int assume_stuck_buffers_are_ok
Definition: afhba_stream_drv.c:113
DESC_FIFO_SZ
#define DESC_FIFO_SZ
Definition: afhba_stream_drv.c:784
afhba_write_reg
void afhba_write_reg(struct AFHBA_DEV *adev, int regoff, u32 value)
Definition: afhba_core.c:40
AFHBA_STREAM_DEV
Definition: afhba_stream_drv.h:66
ZI_FMT
#define ZI_FMT
assert
#define assert(p)
Definition: acq-fiber-hba.h:55
AFHBA_AURORA_STAT_ERR
#define AFHBA_AURORA_STAT_ERR
Definition: acq-fiber-hba.h:258
MINOR_CATCHUP_HISTO
#define MINOR_CATCHUP_HISTO
Definition: afhba_minor.h:21
AFHBA_DEV::remote
void * remote
Definition: acq-fiber-hba.h:133
AB
Definition: rtm-t_ioctl.h:51
AFHBA_AO_BURST_INIT
#define AFHBA_AO_BURST_INIT
ioctl AFHBA_START_AO_ABN LLC, multiple buffers, OUTPUT
Definition: rtm-t_ioctl.h:110
afs_reset_buffers
int afs_reset_buffers(struct AFHBA_DEV *adev)
Definition: afhba_stream_drv.c:1390
MODULE_PARM_DESC
MODULE_PARM_DESC(RX_TO, "RX timeout (jiffies) [0.1Hz]")
_afs_write_dmareg
void _afs_write_dmareg(struct AFHBA_DEV *adev, int regoff, u32 value)
Definition: afhba_stream_drv.c:304
nbuffers
int nbuffers
Definition: afhba_stream_drv.c:73
COM_SOFT_TRIGGER
@ COM_SOFT_TRIGGER
Definition: acq-fiber-hba.h:395
AFHBA_START_AO_LLC
#define AFHBA_START_AO_LLC
ioctl ACQ2106 Start Low Latency Control Outbound
Definition: rtm-t_ioctl.h:95
AFHBA_STREAM_DEV::data_fifo_histo
unsigned * data_fifo_histo
Definition: afhba_stream_drv.h:115
amon_jiffies
int amon_jiffies
Definition: afhba_stream_drv.c:57
DMA_PULL_DESC_RAM
#define DMA_PULL_DESC_RAM
Definition: acq-fiber-hba.h:359
buffer_debug
int buffer_debug
Definition: afhba_stream_drv.c:69
DMA_BOTH_SEL
@ DMA_BOTH_SEL
Definition: acq-fiber-hba.h:317
PCI_REG_WRITE
#define PCI_REG_WRITE(adev, regoff, value)
Definition: afhba_stream_drv.h:169
AFHBA_STREAM_DEV::pid
unsigned pid
Definition: afhba_stream_drv.h:132
AFHBA_STREAM_DEV::ZI_BAD
@ ZI_BAD
Definition: afhba_stream_drv.h:148
_afs_read_dmareg
u32 _afs_read_dmareg(struct AFHBA_DEV *adev, int regoff)
Definition: afhba_stream_drv.c:313
DMA_PULL_DESC_FIFO
#define DMA_PULL_DESC_FIFO
Definition: acq-fiber-hba.h:331
HostBuffer::len
int len
Definition: acq-fiber-hba.h:98
AFHBA_DEV::mappings
struct AFHBA_DEV::PciMapping mappings[MAP_COUNT_MAX]
AFHBA_STREAM_DEV::push_ram_cursor
int push_ram_cursor
Definition: afhba_stream_drv.h:136
PS_STOP_DONE
@ PS_STOP_DONE
Definition: afhba_stream_drv.h:53
__afs_dma_reset
void __afs_dma_reset(struct AFHBA_DEV *adev, u32 dma_sel)
Definition: afhba_stream_drv.c:555
afs_stop_stream_pull
void afs_stop_stream_pull(struct AFHBA_DEV *adev)
Definition: afhba_stream_drv.c:1444
HostBuffer
Definition: acq-fiber-hba.h:94
DMA_CTRL_RECYCLE
#define DMA_CTRL_RECYCLE
Definition: acq-fiber-hba.h:341
HostBuffer::BS_FILLING
@ BS_FILLING
Definition: acq-fiber-hba.h:103
AFDMAC_PAGE
#define AFDMAC_PAGE
Definition: afhba_stream_drv.h:26
StreamBufferDef
Definition: rtm-t_ioctl.h:117
_afs_read_pcireg
u32 _afs_read_pcireg(struct AFHBA_DEV *adev, int regoff)
Definition: afhba_stream_drv.c:332
afhba_stream_drv_del
int afhba_stream_drv_del(struct AFHBA_DEV *adev)
Definition: afhba_stream_drv.c:2246
AFHBA_DEV::aurora_error_count
int aurora_error_count
Definition: acq-fiber-hba.h:149
max_dma_load_retry
int max_dma_load_retry
Definition: afhba_stream_drv.c:117
_afs_read_zynqreg
u32 _afs_read_zynqreg(struct AFHBA_DEV *adev, int regoff)
Definition: afhba_stream_drv.c:287
AFHBA_STREAM_DEV::user
void * user
Definition: afhba_stream_drv.h:151
acq-fiber-hba.h
DMA_CTRL_RD
#define DMA_CTRL_RD(adev)
Definition: afhba_stream_drv.h:182
EMPTY2
#define EMPTY2
Definition: afhba_stream_drv.h:245
afs_comms_init
int afs_comms_init(struct AFHBA_DEV *adev)
Definition: afhba_stream_drv.c:749
PCIE_CONF
@ PCIE_CONF
Definition: acq-fiber-hba.h:291
AFHBA_STREAM_DEV::JOB::on_pull_dma_timeout
int(* on_pull_dma_timeout)(struct AFHBA_DEV *adev)
Definition: afhba_stream_drv.h:108
afs_dma_write
ssize_t afs_dma_write(struct file *file, const char *buf, size_t count, loff_t *f_pos)
Definition: afhba_stream_drv.c:1759
IBUF_MAGIC
#define IBUF_MAGIC
Definition: rtm-t_ioctl.h:121
COMMON_BASE
#define COMMON_BASE
Definition: acq-fiber-hba.h:393
PCIE_CONF_AF_PORT_SHL
#define PCIE_CONF_AF_PORT_SHL
Definition: acq-fiber-hba.h:295
AFHBA_STREAM_DEV::JOB::buffers_demand
unsigned buffers_demand
Definition: afhba_stream_drv.h:90
HostBuffer::req_len
int req_len
Definition: acq-fiber-hba.h:99
u32
unsigned u32
Definition: local.h:60
AFHBA_DEV::pci_dev
struct pci_dev * pci_dev
Definition: acq-fiber-hba.h:115
HostBuffer::list
struct list_head list
Definition: acq-fiber-hba.h:101
afs_dma_ioctl
long afs_dma_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
Definition: afhba_stream_drv.c:1944
start_job
int start_job(struct AFHBA_DEV *adev)
Definition: afhba_stream_drv.c:1190
_afs_write_pcireg
void _afs_write_pcireg(struct AFHBA_DEV *adev, int regoff, u32 value)
Definition: afhba_stream_drv.c:322
AFHBA_START_AI_AB
#define AFHBA_START_AI_AB
ioctl ACQ2106 Start AI, Buffer A/B struct XLLC_DEF [2].
Definition: rtm-t_ioctl.h:98
AFHBA_DEV::stream_fops
struct file_operations * stream_fops
Definition: acq-fiber-hba.h:144