34 #include <linux/module.h>
40 #include <linux/poll.h>
41 #include <linux/version.h>
46 #define DEF_BUFFER_LEN 0x100000
55 "WORK timeout (jiffies) [50Hz] - decrease for hi fifo stat poll rate");
121 static struct file_operations afs_fops_dma;
122 static struct file_operations afs_fops_dma_poll;
134 static int getOrder(
int len)
139 for (order = 0; 1 << order < len; ++order){
145 static int getAFDMAC_Order(
int len)
150 for (order = 0; 1 << order < len; ++order){
160 for (ii = 0; ii < sdev->
nbuffers; ++ii){
193 #define COPY_FROM_USER(to, from, len) \
194 if (copy_from_user(to, from, len)) { return -EFAULT; }
196 #define COPY_TO_USER(to, from, len) \
197 if (copy_to_user(to, from, len)) { return -EFAULT; }
199 static void write_descr(
struct AFHBA_DEV *adev,
unsigned offset,
int idesc)
205 dev_info(
pdev(adev),
"write_descr(%d) [%d] offset:%04x = %08x",
208 DEV_DBG(
pdev(adev),
"ibuf %d offset:%04x = %08x", idesc, offset, descr);
209 writel(descr, adev->
remote+offset);
214 static int _write_ram_descr(
struct AFHBA_DEV *adev,
unsigned offset,
int idesc,
int *cursor)
224 void* va = adev->
remote + offset + cr*
sizeof(unsigned);
226 DEV_DBG(
pdev(adev),
"_write_ram_descr() ibuf %d offset:%04x = %08x cursor:%d",
227 idesc, offset, descr, cr);
230 if (descr != descrr){
231 dev_err(
pdev(adev),
"descriptor [%4d] wrote 0x%08x read 0x%08x",
241 static int validate_dma_descriptor_ram(
242 struct AFHBA_DEV *adev,
unsigned offset,
unsigned max_id,
int phase)
249 for (
id = 0;
id < max_id; ++id){
250 descr = readl(adev->
remote+offset+
id*
sizeof(
unsigned));
252 dev_err(
pdev(adev),
"%s phase:%d descriptor mismatch at [%d] w:%08x r:%08x",
253 "validate_dma_descriptor_ram", phase,
id, sdev->
hbx[
id].
descr, descr);
256 dev_dbg(
pdev(adev),
"%s phase:%d descriptor at [%d] w:%08x r:%08x",
257 "validate_dma_descriptor_ram", phase,
id, sdev->
hbx[
id].
descr, descr);
260 dev_err(
pdev(adev),
"%s descriptor errors %d out of %d",
261 "validate_dma_descriptor_ram", errors,
id);
264 dev_info(
pdev(adev),
"%s %d descriptors PASS",
265 "validate_dma_descriptor_ram",
id);
269 static void write_ram_descr(
struct AFHBA_DEV *adev,
unsigned offset,
int idesc)
280 dev_warn(
pdev(adev),
"write_ram_descr valid PUSH only");
291 u32 value = readl(va);
317 u32 value = readl(va);
336 u32 value = readl(va);
340 static void afs_load_push_descriptor(
struct AFHBA_DEV *adev,
int idesc)
366 static void afs_init_dma_clr(
struct AFHBA_DEV *adev)
372 static void afs_configure_streaming_dma(
388 if (check != dma_ctrl){
389 dev_err(
pdev(adev),
"%s setting DMA_CTRL w:%08x r:%08x",
390 "afs_configure_streaming_dma", dma_ctrl, check);
394 static void afs_dma_set_recycle(
403 static void afs_load_llc_single_dma(
408 u32 len64 = ((len/64-1) + (len%64!=0));
412 dev_dbg(
pdev(adev),
"afs_load_llc_single_dma %s 0x%08x %d",
413 sDMA_SEL(dma_sel), pa, len);
426 "afs_load_llc_single_dma len64:%08x dma_desc:%08x dma_ctrl:%08x",
427 len64, dma_desc, dma_ctrl);
431 writel(dma_desc, adev->
remote+offset);
435 static void afs_load_dram_descriptors(
450 for (idesc = 0; idesc < nbufs; ++idesc, offset += 4){
451 struct XLLC_DEF* bd = &buffers[idesc];
452 u32 dma_desc = bd->
pa
455 dev_dbg(
pdev(adev),
"%s() [%d] 0x%08x",
456 "afs_load_dram_descriptors", idesc, dma_desc);
457 writel(dma_desc, adev->
remote+offset);
467 #define LL_MAX_CNT 16
469 #define LL_NB(cnt) ((cnt)-1)
470 #define LL_MAX_LEN (LL_MAX_CNT*LL_BLOCK)
472 static void afs_load_dram_descriptors_ll(
483 for ( ;
ib < nbufs; ++
ib){
493 if (residue != max(residue,
LL_BLOCK)){
494 dev_warn(
pdev(adev),
"%s() [%d] [%04x] expect data discontinuity want:%d getting:%d\n",
495 __FUNCTION__, idb, reg_off, residue,
LL_BLOCK);
503 | ((reg_off>>2)&0x0f);
505 dev_dbg(
pdev(adev),
"%s() [%d] [%04x] %p := 0x%08x",
506 __FUNCTION__, idb, reg_off, adev->
remote+reg_off, dma_desc);
508 writel(dma_desc, adev->
remote+reg_off);
522 static int _afs_dma_started(
struct AFHBA_DEV *adev,
int shl)
530 static inline int afs_dma_started(
struct AFHBA_DEV *adev,
enum DMA_SEL dma_sel)
537 static int afs_aurora_lane_up(
struct AFHBA_DEV *adev)
547 dev_info(
pdev(adev),
"afs_aurora_lane_up %c status 0x%08x",
548 aurora_id(adev), stat);
577 dev_err(
pdev(adev),
"DMA_CTRL_EN NOT SET wanted:%08x got:%08x",
583 static int afs_aurora_errors(
struct AFHBA_DEV *adev)
586 int link_warning = 0;
593 "aurora%c initial s:0x%08x m:0x%08x e:0x%08x",
594 adev->
sfp-SFP_A +
'A',
598 "aurora%c error: [%d] s:0x%08x m:0x%08x e:0x%08x",
599 adev->
sfp-SFP_A +
'A',
607 "aurora%c error: [%d] s:0x%08x m:0x%08x e:0x%08x NOT CLEARED",
608 adev->
sfp-SFP_A +
'A',
614 return link_warning? -1: 1;
621 static void _afs_pcie_mirror_init(
struct AFHBA_DEV *adev)
637 static int is_valid_z_ident(
unsigned z_ident,
char buf[],
int maxbuf)
639 if ((z_ident&0x21060000) == 0x21060000){
640 snprintf(buf, maxbuf,
"acq2106_%03d.comms%X",
641 z_ident&0x0ffff, (z_ident&0x00f00000)>>20);
643 }
else if ((z_ident&0xe4330000) == 0xe4330000){
644 snprintf(buf, maxbuf,
"kmcu_%03d.comms%x",
645 z_ident&0x0ffff, (z_ident&0x00f00000)>>20);
647 }
else if ((z_ident&0x43000000) == 0x43000000){
648 snprintf(buf, maxbuf,
"kmcu_%03d.comms%x",
656 static int _afs_check_read(
struct AFHBA_DEV *adev)
662 if (z_ident2 == 0xffffffff || (z_ident2&~0x0ffff) == 0xdead0000){
663 dev_err(
pdev(adev),
"ERROR reading Z_IDENT %08x, please reboot now", z_ident2);
667 int valid_id = is_valid_z_ident(z_ident2, buf, 80);
668 #define ZI_FMT "[%d] Z_IDENT 1:0x%08x 2:0x%08x %s"
673 adev->
idx, z_ident1, z_ident2,
681 adev->
idx, z_ident1, z_ident2,
690 static int _afs_comms_init(
struct AFHBA_DEV *adev)
693 enum { WAIT_INIT, WAIT_LANE_UP, WAIT_LANE_STILL_UP, CHECK_READ, TXEN } state = WAIT_INIT;
694 int ticks_in_state = 0;
695 #define CHANGE_STATE(s) state = (s); ticks_in_state = 0; continue
697 for ( ; ; msleep(
MSLEEP_TO), ++ticks_in_state){
698 dev_info(
pdev(adev),
"%s state:%d %s ticks:%d", __FUNCTION__, state,
699 state==WAIT_INIT?
"WAIT_INIT": state==WAIT_LANE_UP?
"WAIT_LANE_UP":
700 state==WAIT_LANE_STILL_UP?
"WAIT_LANE_STILL_UP": state==CHECK_READ?
"CHECK_READ":
701 state==TXEN?
"TXEN":
"???",
710 if (afs_aurora_lane_up(adev)){
713 if (ticks_in_state > 100){
718 case WAIT_LANE_STILL_UP:
719 if (afs_aurora_lane_up(adev)){
720 if (ticks_in_state > 3){
721 dev_info(
pdev(adev),
"%s call mirror_init 01", __FUNCTION__);
722 afs_init_dma_clr(adev);
723 _afs_pcie_mirror_init(adev);
732 if (afs_aurora_errors(adev) == -1 ){
733 dev_warn(
pdev(adev),
"%s bad aurora, TXDIS", __FUNCTION__);
743 dev_err(
pdev(adev),
"%s illegal state %d", __FUNCTION__, state);
753 if (afs_aurora_lane_up(adev)){
755 dev_info(
pdev(adev),
"aurora%c link up!", aurora_id(adev));
765 dev_info(
pdev(adev),
"aurora%c link down!", aurora_id(adev));
768 dev_dbg(
pdev(adev),
"aurora lane down");
780 #define RTDMAC_DATA_FIFO_CNT 0x1000
781 #define RTDMAC_DESC_FIFO_CNT 0x1000
783 #define DATA_FIFO_SZ (RTDMAC_DATA_FIFO_CNT*sizeof(unsigned))
784 #define DESC_FIFO_SZ (RTDMAC_DESC_FIFO_CNT*sizeof(unsigned))
786 static void mark_empty(
struct device *dev,
struct HostBuffer *hb){
787 u32 mark_len = 2 *
sizeof(
u32);
789 u32 *pmark = (
u32*)(hb->
va + offset);
794 dma_sync_single_for_device(dev, hb->
pa+offset, mark_len, PCI_DMA_FROMDEVICE);
798 static int is_marked_empty(
struct device *dev,
struct HostBuffer *hb){
799 u32 mark_len = 2 *
sizeof(
u32);
801 u32 *pmark = (
u32*)(hb->
va + offset);
804 dma_sync_single_for_cpu(dev, hb->
pa+offset, mark_len, PCI_DMA_FROMDEVICE);
811 static int queue_next_free_buffer(
struct AFHBA_DEV *adev)
816 if (mutex_lock_interruptible(&sdev->
list_mutex)){
819 if (!list_empty_careful(&sdev->bp_empties.list)){
822 mark_empty(&adev->
pci_dev->dev, hb);
824 afs_load_push_descriptor(adev, hb->
ibuf);
826 list_move_tail(&hb->
list, &sdev->bp_filling.list);
833 static void queue_free_buffers(
struct AFHBA_DEV *adev)
836 struct JOB *job = &sdev->
job;
838 (job->buffers_received+job->buffers_discarded);
840 while (job->buffers_queued < job->buffers_demand){
841 if (queue_next_free_buffer(adev)){
842 ++job->buffers_queued;
859 if (sdev->
hbx[ii].
descr == inflight_descr){
860 return &sdev->
hbx[ii];
866 static void report_inflight(
867 struct AFHBA_DEV *adev,
int ibuf,
int is_error,
char *msg)
873 dev_warn(
pdev(adev),
"%s: buffer %02d %s",
874 msg, ibuf, is_error?
"ERROR":
"WARNING");
880 dev_warn(
pdev(adev),
"%s: buffer %02d last descr:%08x [%02d] fifo:%08x %s",
884 inflight? inflight->
ibuf: -1,
886 is_error?
"ERROR":
"WARNING");
890 static void report_stuck_buffer(
struct AFHBA_DEV *adev,
int ibuf)
892 report_inflight(adev,
ibuf, 0,
"buffer was skipped");
899 dev_dbg(
pdev(adev),
"ibuf %d", hb->
ibuf);
901 list_move_tail(&hb->
list, &sdev->bp_empties.list);
907 struct JOB *job = &sdev->
job;
909 hb->
esta = read_astatus2(adev);
911 report_inflight(adev, hb->
ibuf, 0,
"->FULL");
915 list_move_tail(&hb->
list, &sdev->bp_full.list);
916 job->buffers_received++;
919 static int queue_full_buffers(
struct AFHBA_DEV *adev)
925 struct JOB *job = &sdev->
job;
929 if (mutex_lock_interruptible(&sdev->
list_mutex)){
933 list_for_each_entry_safe(hb, tmp, &sdev->bp_filling.list, list){
934 if (++ifilling == 1){
937 if (is_marked_empty(&adev->
pci_dev->dev, hb)){
944 if (ifilling > 1 && first && hb != first){
945 if (is_marked_empty(&adev->
pci_dev->dev, first)){
948 report_inflight(adev, first->
ibuf, 0,
"queuing dirty");
949 nrx = _queue_full_buffer(adev, first, nrx);
951 report_stuck_buffer(adev, first->
ibuf);
952 return_empty(adev, first);
954 dev_warn(
pdev(adev),
"stop_on_skipped_buffer triggered");
960 report_inflight(adev, first->
ibuf, 0,
"jackpot");
961 nrx = _queue_full_buffer(adev, first, nrx);
966 nrx = _queue_full_buffer(adev, hb, nrx);
972 dev_warn(
pdev(adev),
"ifilling > NBUFFERS?");
975 job->catchup_histo[nrx]++;
1008 dev_dbg(
pdev(adev),
"afs_init_buffers() 01 order=%d", order);
1011 INIT_LIST_HEAD(&sdev->bp_empties.list);
1012 INIT_LIST_HEAD(&sdev->bp_filling.list);
1013 INIT_LIST_HEAD(&sdev->bp_full.list);
1020 dev_dbg(
pdev(adev),
"allocating %d buffers size:%d order:%d dev.dma_mask:%08llx",
1023 for (hb = sdev->
hbx, ii = 0; ii <
nbuffers; ++ii, ++hb){
1024 void *buf = (
void*)__get_free_pages(GFP_KERNEL|GFP_DMA32, order);
1027 dev_err(
pdev(adev),
"failed to allocate buffer %d", ii);
1031 dev_dbg(
pdev(adev),
"buffer %2d allocated at %p, map it", ii, buf);
1034 hb->
pa = dma_map_single(&adev->
pci_dev->dev, buf,
1039 dev_dbg(
pdev(adev),
"buffer %2d allocated, map done", ii);
1042 dev_err(
pdev(adev),
"HB NOT PAGE ALIGNED");
1050 dev_dbg(
pdev(adev),
"[%d] %p %08x %d %08x",
1052 list_add_tail(&hb->
list, &sdev->bp_empties.list);
1066 init_histo_buffers(sdev);
1067 dev_dbg(
pdev(adev),
"afs_init_buffers() 99");
1071 static irqreturn_t afs_cos_isr(
int irq,
void *data)
1077 dev_info(
pdev(adev),
"afs_cos_isr %08x", cr);
1084 static int hook_interrupts(
struct AFHBA_DEV* adev)
1086 int rc = pci_enable_msi(adev->
pci_dev);
1088 dev_err(
pdev(adev),
"pci_enable_msi_exact(%d) FAILED", 1);
1091 rc = request_irq(adev->
pci_dev->irq, afs_cos_isr, IRQF_SHARED,
"afhba", adev);
1093 pr_warn(
"afhba.%d: request_irq =%d failed!\n",
1095 pci_disable_msi(adev->
pci_dev);
1102 static void smooth(
unsigned *rate,
unsigned *old,
unsigned *
new)
1120 static int as_mon(
void *arg)
1123 wait_queue_head_t waitq;
1125 init_waitqueue_head(&waitq);
1127 while(!kthread_should_stop()){
1129 wait_event_interruptible_timeout(waitq, 0, HZ);
1131 smooth(&job->rx_rate,
1132 &job->rx_buffers_previous, &job->buffers_received);
1134 smooth(&job->int_rate, &job->int_previous, &job->ints);
1141 static void check_fifo_status(
struct AFHBA_DEV* adev)
1144 u32 desc_sta = DMA_PUSH_DESC_STA_RD(adev);
1145 u32 desc_flags = check_fifo_xxxx(tdev->desc_fifo_histo, desc_sta);
1146 u32 data_sta = rtd_read_reg(tdev, RTMT_C_DATA_FIFSTA);
1147 u32 data_flags = check_fifo_xxxx(tdev->data_fifo_histo, data_sta);
1149 if ((data_flags & RTMT_H_XX_DMA_FIFSTA_FULL) &&
1150 tdev->job.errors < 10){
1152 err(
"GAME OVER: %d FIFSTA_DATA_OVERFLOW: 0x%08x",
1153 tdev->idx, data_sta);
1154 if (++tdev->job.errors == 10){
1155 err(
"too many errors, turning reporting off ..");
1158 if ((desc_flags & RTMT_H_XX_DMA_FIFSTA_FULL) != 0 &&
1159 tdev->job.errors < 10){
1160 err(
"GAME OVER: %d FIFSTA_DESC_OVERFLOW: 0x%08x",
1161 tdev->idx, desc_sta);
1162 if (++tdev->job.errors == 10){
1163 err(
"too many errors, turning reporting off ..");
1171 return !job->please_stop && job->buffers_queued < job->buffers_demand;
1180 queue_free_buffers(adev);
1185 queue_free_buffers(adev);
1206 dev_err(
pdev(adev),
"ERROR dma load retry count exceeded");
1217 static int afs_isr_work(
void *arg)
1221 struct JOB* job = &sdev->
job;
1225 struct sched_param param = { .sched_priority = 10 };
1226 int please_check_fifo = 0;
1227 int job_is_go_but_aurora_is_down = 0;
1228 unsigned loop_jiffies = 0;
1229 unsigned last_amon_jiffies = 0;
1231 sched_setscheduler(current, SCHED_FIFO, ¶m);
1234 for ( ; !kthread_should_stop(); ++loop_count, loop_jiffies +=
WORK_TO){
1235 int timeout = wait_event_interruptible_timeout(
1240 if (!timeout || loop_count%10 == 0){
1241 dev_dbg(
pdev(adev),
"TIMEOUT? %d queue_free_buffers() ? %d",
1247 job_is_go(job) && !job_is_go_but_aurora_is_down){
1248 dev_warn(
pdev(adev),
"job is go but aurora is down");
1249 job_is_go_but_aurora_is_down = 1;
1251 job_is_go_but_aurora_is_down = 0;
1253 last_amon_jiffies = loop_jiffies;
1257 if (!job->dma_started){
1262 queue_free_buffers(adev);
1266 if (job->buffers_demand > 0 && queue_full_buffers(adev) > 0){
1279 switch(job->please_stop){
1285 job->dma_started = 0;
1287 validate_dma_descriptor_ram(
1299 if (please_check_fifo){
1300 check_fifo_status(adev);
1301 please_check_fifo = 0;
1309 static void startWork(
struct AFHBA_DEV *adev)
1316 static void stopWork(
struct AFHBA_DEV *adev)
1328 struct file *file,
char *buf,
size_t count, loff_t *f_pos)
1330 unsigned *the_histo =
PD(file)->private;
1331 int maxentries =
PD(file)->private2;
1332 unsigned cursor = *f_pos;
1335 if (cursor >= maxentries){
1338 int headroom = (maxentries - cursor) *
sizeof(
unsigned);
1339 if (count > headroom){
1343 rc = copy_to_user(buf, the_histo+cursor, count);
1348 *f_pos += count/
sizeof(unsigned);
1352 static struct file_operations afs_fops_histo = {
1358 static int rtm_t_start_stream(
struct AFHBA_DEV *adev,
unsigned buffers_demand)
1361 struct JOB *job = &sdev->
job;
1363 dev_dbg(
pdev(adev),
"01");
1365 memset(job, 0,
sizeof(
struct JOB));
1367 job->buffers_demand = buffers_demand;
1368 if (unlikely(list_empty_careful(&sdev->bp_empties.list))){
1369 dev_err(
pdev(adev),
"no free buffers");
1370 return -ERESTARTSYS;
1374 job->please_stop =
PS_OFF;
1378 dev_dbg(
pdev(adev),
"99");
1382 int afs_histo_open(
struct inode *inode,
struct file *file,
unsigned *histo,
int hcount)
1384 file->f_op = &afs_fops_histo;
1385 PD(file)->private = histo;
1386 PD(file)->private2 = hcount;
1397 if (mutex_lock_interruptible(&sdev->
list_mutex)){
1400 INIT_LIST_HEAD(&sdev->bp_empties.list);
1401 INIT_LIST_HEAD(&sdev->bp_filling.list);
1402 INIT_LIST_HEAD(&sdev->bp_full.list);
1406 for (ii = 0; ii <
nbuffers; ++ii, ++hb){
1408 list_add_tail(&hb->
list, &sdev->bp_empties.list);
1434 dev_info(
pdev(adev),
"afs_stop_llc_pull()");
1440 dev_info(
pdev(adev),
"afs_stop_stream_push()");
1446 dev_info(
pdev(adev),
"afs_stop_stream_pull()");
1461 dev_err(
pdev(adev),
"DMA_CTRL_EN NOT SET attempt restart");
1471 struct JOB* job = &sdev->
job;
1474 job->please_stop =
PS_OFF;
1479 xllc_def->
pa = sdev->
hbx[0].
pa;
1484 job->please_stop =
PS_OFF;
1486 job->push_llc_def = *xllc_def;
1498 xllc_def->
pa = sdev->
hbx[0].
pa;
1512 dev_dbg(
pdev(adev),
"45: DMA open");
1515 if (sdev->
pid == 0){
1516 sdev->
pid = current->pid;
1519 if (sdev->
pid != current->pid){
1528 for (ii = 0; ii !=
nbuffers; ++ii){
1532 return -ERESTARTSYS;
1535 if ((file->f_flags & O_NONBLOCK) != 0){
1536 file->f_op = &afs_fops_dma_poll;
1538 file->f_op = &afs_fops_dma;
1541 dev_dbg(
pdev(adev),
"99");
1553 dev_dbg(
pdev(adev),
"afs_dma_release() 01 %s %d %p<-%p->%p",
1554 adev->
name,
PD(file)->minor,
1555 PD(file)->my_buffers.prev,
1556 &
PD(file)->my_buffers,
1557 PD(file)->my_buffers.next);
1559 if (mutex_lock_interruptible(&sdev->
list_mutex)){
1560 return -ERESTARTSYS;
1562 list_for_each_entry_safe(hb, tmp, &
PD(file)->my_buffers,
list){
1563 dev_dbg(
pdev(adev),
"returning %d", hb->
ibuf);
1564 return_empty(adev, hb);
1569 dev_dbg(
pdev(adev),
"90");
1593 struct file *file,
char __user *buf,
size_t count, loff_t *f_pos)
1601 struct JOB *job = &sdev->
job;
1604 dev_dbg(
pdev(adev),
"01 ps %u count %ld demand %d received %d waiting %d",
1605 (
unsigned)*f_pos, (
long)count,
1606 job->buffers_demand, job->buffers_received,
1607 !list_empty(&sdev->bp_full.list));
1609 if (job->buffers_received >= job->buffers_demand &&
1610 list_empty(&sdev->bp_full.list) ){
1611 dev_dbg(
pdev(adev),
"job done");
1620 rc = wait_event_interruptible(
1623 rc = wait_event_interruptible_timeout(
1625 !list_empty(&sdev->bp_full.list),
RX_TO);
1628 dev_dbg(
pdev(adev),
"done waiting, rc %d", rc);
1631 dev_dbg(
pdev(adev),
"RESTART");
1632 return -ERESTARTSYS;
1633 }
else if (mutex_lock_interruptible(&sdev->
list_mutex)){
1634 return -ERESTARTSYS;
1642 list_for_each_entry_safe(hb, tmp, &sdev->bp_full.list, list){
1643 if (nbytes+
SBDSZ > count){
1644 dev_dbg(
pdev(adev),
"quit nbytes %d count %lu",
1645 nbytes, (
long)count);
1652 if (copy_to_user(buf+nbytes, &sbd,
SBDSZ)){
1656 dev_dbg(
pdev(adev),
"add my_buffers %d", hb->
ibuf);
1658 list_move_tail(&hb->
list, &
PD(file)->my_buffers);
1659 hb->
bstate = BS_FULL_APP;
1664 if (rc == 0 && nbytes == 0){
1665 dev_dbg(
pdev(adev),
"TIMEOUT");
1669 dev_dbg(
pdev(adev),
"return %d", nbytes);
1678 static unsigned int afs_dma_poll(
struct file* file, poll_table *poll_table)
1682 unsigned int mask = POLLOUT | POLLWRNORM;
1683 if (!list_empty(&sdev->bp_full.list)){
1684 mask |= POLLIN | POLLRDNORM;
1687 if (!list_empty(&sdev->bp_full.list)){
1688 mask |= POLLIN | POLLRDNORM;
1695 struct file *file,
char __user *buf,
size_t count, loff_t *f_pos)
1703 struct JOB *job = &sdev->
job;
1710 dev_dbg(
pdev(adev),
"01 ps %u count %ld demand %d received %d waiting %d",
1711 (
unsigned)*f_pos, (
long)count,
1712 job->buffers_demand, job->buffers_received,
1713 !list_empty(&sdev->bp_full.list) );
1715 if (job->buffers_received >= job->buffers_demand &&
1716 list_empty(&sdev->bp_full.list) ){
1717 dev_dbg(
pdev(adev),
"job done");
1724 if (queue_full_buffers(adev)){
1725 list_for_each_entry_safe(hb, tmp, &sdev->bp_full.list,
list){
1726 if (nbytes+
sizeof(
int) > count){
1727 dev_dbg(
pdev(adev),
"quit nbytes %d count %lu",
1728 nbytes, (
long)count);
1732 if (copy_to_user(buf+nbytes, &hb->
ibuf,
sizeof(
int))){
1736 dev_dbg(
pdev(adev),
"add my_buffers %d", hb->
ibuf);
1738 list_move_tail(&hb->
list, &
PD(file)->my_buffers);
1740 nbytes +=
sizeof(int);
1743 if (rc == 0 && nbytes == 0){
1744 dev_dbg(
pdev(adev),
"TIMEOUT");
1748 dev_dbg(
pdev(adev),
"return %d", nbytes);
1760 struct file *file,
const char *buf,
size_t count, loff_t *f_pos)
1772 dev_dbg(
pdev(adev),
"pos %u count %lu", (
unsigned)*f_pos, (
long)count);
1774 if (mutex_lock_interruptible(&sdev->
list_mutex)){
1775 return -ERESTARTSYS;
1777 while (nbytes+
sizeof(
int) <= count){
1780 if (copy_from_user(&
id, buf+nbytes,
sizeof(
int))){
1783 dev_dbg(
pdev(adev),
"[%u] recycle buffer %d",
1784 (
unsigned)(nbytes/
sizeof(
int)),
id);
1787 dev_err(
pdev(adev),
"ID < 0");
1791 dev_err(
pdev(adev),
"ID > NBUFFERS");
1794 }
else if (sdev->
hbx[
id].
bstate != BS_FULL_APP){
1795 dev_err(
pdev(adev),
"STATE != BS_FULL_APP %d",
1804 list_for_each_entry(
1805 hb, &
PD(file)->my_buffers,
list){
1807 dev_dbg(
pdev(adev),
"listing %d", hb->
ibuf);
1810 if (hb->
ibuf ==
id){
1811 return_empty(adev, hb);
1812 nbytes +=
sizeof(int);
1818 dev_err(
pdev(adev),
"ATTEMPT TO RET BUFFER NOT MINE");
1829 dev_dbg(
pdev(adev),
"99 return %d", rc);
1839 for (ii = 0; ii < 16; ++ii){
1840 if (1<<ii > nblocks){
1841 int len1 = (1<<(ii-1))*1024;
1843 xdef[1].
pa = xdef[0].
pa + len1;
1844 xdef[1].
len = xdef[0].
len - len1;
1847 xdef[3].
pa = xdef[2].
pa + len1;
1848 xdef[3].
len = xdef[2].
len - len1;
1851 }
else if (1<<ii == nblocks){
1857 printk(
"ERROR: fix_dma_descriptors BUFFER TOO LONG");
1863 struct JOB* job = &sdev->
job;
1868 job->please_stop =
PS_OFF;
1886 afs_load_dram_descriptors(adev,
DMA_PUSH_SEL, xdef, nb);
1889 job->please_stop =
PS_OFF;
1890 job->on_push_dma_timeout = 0;
1899 struct JOB* job = &sdev->
job;
1903 job->please_stop =
PS_OFF;
1912 dev_dbg(
pdev(adev),
"%s descriptors:%d %s", __FUNCTION__,
abn->
ndesc,
1924 dev_dbg(
pdev(adev),
"%s [%2d] pa:%08x len:%d",
1931 job->please_stop =
PS_OFF;
1934 job->on_push_dma_timeout = 0;
1937 job->on_pull_dma_timeout = 0;
1945 unsigned int cmd,
unsigned long arg)
1949 void* varg = (
void*)arg;
1956 u32 my_transfer_buffers;
1958 return rtm_t_start_stream(adev, my_transfer_buffers);
1987 struct ABN *
abn = kmalloc(
sizeof(
struct ABN), GFP_KERNEL);
2001 dev_err(
pdev(adev),
"AFHBA_AO_BURST_INIT other user data active");
2032 int minor =
PD(vma->vm_file)->minor;
2036 unsigned long vsize = vma->vm_end - vma->vm_start;
2037 unsigned long psize = hb->
len;
2038 unsigned pfn = hb->
pa >> PAGE_SHIFT;
2040 dev_dbg(
pdev(adev),
"%c vsize %lu psize %lu %s",
2041 'D', vsize, psize, vsize>psize?
"EINVAL":
"OK");
2046 if (remap_pfn_range(
2047 vma, vma->vm_start, pfn, vsize, vma->vm_page_prot)){
2054 static struct file_operations afs_fops_dma = {
2058 .poll = afs_dma_poll,
2064 static struct file_operations afs_fops_dma_poll = {
2068 .poll = afs_dma_poll,
2079 dev_dbg(
pdev(adev),
"01");
2083 dev_dbg(
pdev(adev),
"33: minor %d",
PD(file)->minor);
2085 switch((
PD(file)->minor)){
2104 dev_err(
pdev(adev),
"99 adev %p name %s", adev, adev->
name);
2111 static struct file_operations afs_fops = {
2117 static ssize_t show_zmod_id(
2118 struct device * dev,
2119 struct device_attribute *attr,
2126 static DEVICE_ATTR(z_mod_id, S_IRUGO, show_zmod_id, 0);
2128 static ssize_t show_z_ident(
2129 struct device * dev,
2130 struct device_attribute *attr,
2137 static DEVICE_ATTR(z_ident, S_IRUGO, show_z_ident, 0);
2139 static ssize_t show_acq_model(
2140 struct device * dev,
2141 struct device_attribute *attr,
2146 model = model&0x2106;
2147 return sprintf(buf,
"acq%04x\n", model);
2150 static DEVICE_ATTR(acq_model, S_IRUGO, show_acq_model, 0);
2152 static ssize_t show_acq_port(
2153 struct device * dev,
2154 struct device_attribute *attr,
2160 return sprintf(buf,
"%X\n", comms);
2163 static DEVICE_ATTR(acq_port, S_IRUGO, show_acq_port, 0);
2165 static ssize_t show_acq_ident(
2166 struct device * dev,
2167 struct device_attribute *attr,
2172 unsigned ident = z_ident&0x0ffff;
2173 unsigned model = (z_ident >> 16) & 0x2106;
2175 return sprintf(buf,
"acq%04x_%03d\n", model, ident);
2178 static DEVICE_ATTR(acq_ident, S_IRUGO, show_acq_ident, 0);
2180 static ssize_t store_com_trg(
2181 struct device * dev,
2182 struct device_attribute *attr,
2183 const char * buf,
size_t count)
2188 if (sscanf(buf,
"%x", &tv) == 1){
2199 static DEVICE_ATTR(com_trg, (S_IWUSR|S_IWGRP), 0, store_com_trg);
2202 static const struct attribute *dev_attrs[] = {
2203 &dev_attr_com_trg.attr,
2204 &dev_attr_z_mod_id.attr,
2205 &dev_attr_z_ident.attr,
2206 &dev_attr_acq_model.attr,
2207 &dev_attr_acq_port.attr,
2208 &dev_attr_acq_ident.attr,
2215 const struct attribute ** attrs = dev_attrs;
2221 rc = sysfs_create_files(&adev->
class_dev->kobj, attrs);
2223 dev_err(
pdev(adev),
"failed to create files");
2233 dev_info(
pdev(adev),
"afhba_stream_drv_init %s name:%s idx:%d",
REVID, adev->
name, adev->
idx);
2238 hook_interrupts(adev);
2248 dev_info(
pdev(adev),
"afhba_stream_drv_del()");
2249 afs_init_dma_clr(adev);