[PATCH] Re: 2.5: further llseek cleanup (3/3)
[opensuse:kernel.git] / drivers / ieee1394 / pcilynx.c
1 /*
2  * ti_pcilynx.c - Texas Instruments PCILynx driver
3  * Copyright (C) 1999,2000 Andreas Bombe <andreas.bombe@munich.netsurf.de>,
4  *                         Stephan Linz <linz@mazet.de>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software Foundation,
18  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19  */
20
21 #include <linux/config.h>
22 #include <linux/kernel.h>
23 #include <linux/slab.h>
24 #include <linux/interrupt.h>
25 #include <linux/wait.h>
26 #include <linux/errno.h>
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/pci.h>
30 #include <linux/fs.h>
31 #include <linux/poll.h>
32 #include <linux/smp_lock.h>
33 #include <asm/byteorder.h>
34 #include <asm/atomic.h>
35 #include <asm/io.h>
36 #include <asm/uaccess.h>
37
38 #include "ieee1394.h"
39 #include "ieee1394_types.h"
40 #include "hosts.h"
41 #include "ieee1394_core.h"
42 #include "highlevel.h"
43 #include "pcilynx.h"
44
45
46 /* print general (card independent) information */
47 #define PRINT_G(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
48 /* print card specific information */
49 #define PRINT(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
50
51 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
52 #define PRINT_GD(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
53 #define PRINTD(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
54 #else
55 #define PRINT_GD(level, fmt, args...) do {} while (0)
56 #define PRINTD(level, card, fmt, args...) do {} while (0)
57 #endif
58
59
60 static struct hpsb_host_driver *lynx_driver;
61 static unsigned int card_id;
62
63 /*
64  * PCL handling functions.
65  */
66
67 static pcl_t alloc_pcl(struct ti_lynx *lynx)
68 {
69         u8 m;
70         int i, j;
71
72         spin_lock(&lynx->lock);
73         /* FIXME - use ffz() to make this readable */
74         for (i = 0; i < (LOCALRAM_SIZE / 1024); i++) {
75                 m = lynx->pcl_bmap[i];
76                 for (j = 0; j < 8; j++) {
77                         if (m & 1<<j) {
78                                 continue;
79                         }
80                         m |= 1<<j;
81                         lynx->pcl_bmap[i] = m;
82                         spin_unlock(&lynx->lock);
83                         return 8 * i + j;
84                 }
85         }
86         spin_unlock(&lynx->lock);
87
88         return -1;
89 }
90
91
92 #if 0
93 static void free_pcl(struct ti_lynx *lynx, pcl_t pclid)
94 {
95         int off, bit;
96
97         off = pclid / 8;
98         bit = pclid % 8;
99
100         if (pclid < 0) {
101                 return;
102         }
103
104         spin_lock(&lynx->lock);
105         if (lynx->pcl_bmap[off] & 1<<bit) {
106                 lynx->pcl_bmap[off] &= ~(1<<bit);
107         } else {
108                 PRINT(KERN_ERR, lynx->id, 
109                       "attempted to free unallocated PCL %d", pclid);
110         }
111         spin_unlock(&lynx->lock);
112 }
113
114 /* functions useful for debugging */        
115 static void pretty_print_pcl(const struct ti_pcl *pcl)
116 {
117         int i;
118
119         printk("PCL next %08x, userdata %08x, status %08x, remtrans %08x, nextbuf %08x\n",
120                pcl->next, pcl->user_data, pcl->pcl_status, 
121                pcl->remaining_transfer_count, pcl->next_data_buffer);
122
123         printk("PCL");
124         for (i=0; i<13; i++) {
125                 printk(" c%x:%08x d%x:%08x",
126                        i, pcl->buffer[i].control, i, pcl->buffer[i].pointer);
127                 if (!(i & 0x3) && (i != 12)) printk("\nPCL");
128         }
129         printk("\n");
130 }
131         
132 static void print_pcl(const struct ti_lynx *lynx, pcl_t pclid)
133 {
134         struct ti_pcl pcl;
135
136         get_pcl(lynx, pclid, &pcl);
137         pretty_print_pcl(&pcl);
138 }
139 #endif
140
141
142
143 /***********************************
144  * IEEE-1394 functionality section *
145  ***********************************/
146
147
148 static int get_phy_reg(struct ti_lynx *lynx, int addr)
149 {
150         int retval;
151         int i = 0;
152
153         unsigned long flags;
154
155         if (addr > 15) {
156                 PRINT(KERN_ERR, lynx->id,
157                       "%s: PHY register address %d out of range",
158                       __FUNCTION__, addr);
159                 return -1;
160         }
161
162         spin_lock_irqsave(&lynx->phy_reg_lock, flags);
163
164         reg_write(lynx, LINK_PHY, LINK_PHY_READ | LINK_PHY_ADDR(addr));
165         do {
166                 retval = reg_read(lynx, LINK_PHY);
167
168                 if (i > 10000) {
169                         PRINT(KERN_ERR, lynx->id, "%s: runaway loop, aborting",
170                               __FUNCTION__);
171                         retval = -1;
172                         break;
173                 }
174                 i++;
175         } while ((retval & 0xf00) != LINK_PHY_RADDR(addr));
176
177         reg_write(lynx, LINK_INT_STATUS, LINK_INT_PHY_REG_RCVD);
178         spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
179
180         if (retval != -1) {
181                 return retval & 0xff;
182         } else {
183                 return -1;
184         }
185 }
186
187 static int set_phy_reg(struct ti_lynx *lynx, int addr, int val)
188 {
189         unsigned long flags;
190
191         if (addr > 15) {
192                 PRINT(KERN_ERR, lynx->id,
193                       "%s: PHY register address %d out of range", __FUNCTION__, addr);
194                 return -1;
195         }
196
197         if (val > 0xff) {
198                 PRINT(KERN_ERR, lynx->id,
199                       "%s: PHY register value %d out of range", __FUNCTION__, val);
200                 return -1;
201         }
202
203         spin_lock_irqsave(&lynx->phy_reg_lock, flags);
204
205         reg_write(lynx, LINK_PHY, LINK_PHY_WRITE | LINK_PHY_ADDR(addr)
206                   | LINK_PHY_WDATA(val));
207
208         spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
209
210         return 0;
211 }
212
213 static int sel_phy_reg_page(struct ti_lynx *lynx, int page)
214 {
215         int reg;
216
217         if (page > 7) {
218                 PRINT(KERN_ERR, lynx->id,
219                       "%s: PHY page %d out of range", __FUNCTION__, page);
220                 return -1;
221         }
222
223         reg = get_phy_reg(lynx, 7);
224         if (reg != -1) {
225                 reg &= 0x1f;
226                 reg |= (page << 5);
227                 set_phy_reg(lynx, 7, reg);
228                 return 0;
229         } else {
230                 return -1;
231         }
232 }
233
234 #if 0 /* not needed at this time */
235 static int sel_phy_reg_port(struct ti_lynx *lynx, int port)
236 {
237         int reg;
238
239         if (port > 15) {
240                 PRINT(KERN_ERR, lynx->id,
241                       "%s: PHY port %d out of range", __FUNCTION__, port);
242                 return -1;
243         }
244
245         reg = get_phy_reg(lynx, 7);
246         if (reg != -1) {
247                 reg &= 0xf0;
248                 reg |= port;
249                 set_phy_reg(lynx, 7, reg);
250                 return 0;
251         } else {
252                 return -1;
253         }
254 }
255 #endif
256
257 static u32 get_phy_vendorid(struct ti_lynx *lynx)
258 {
259         u32 pvid = 0;
260         sel_phy_reg_page(lynx, 1);
261         pvid |= (get_phy_reg(lynx, 10) << 16);
262         pvid |= (get_phy_reg(lynx, 11) << 8);
263         pvid |= get_phy_reg(lynx, 12);
264         PRINT(KERN_INFO, lynx->id, "PHY vendor id 0x%06x", pvid);
265         return pvid;
266 }
267
268 static u32 get_phy_productid(struct ti_lynx *lynx)
269 {
270         u32 id = 0;
271         sel_phy_reg_page(lynx, 1);
272         id |= (get_phy_reg(lynx, 13) << 16);
273         id |= (get_phy_reg(lynx, 14) << 8);
274         id |= get_phy_reg(lynx, 15);
275         PRINT(KERN_INFO, lynx->id, "PHY product id 0x%06x", id);
276         return id;
277 }
278
279 static quadlet_t generate_own_selfid(struct ti_lynx *lynx,
280                                      struct hpsb_host *host)
281 {
282         quadlet_t lsid;
283         char phyreg[7];
284         int i;
285
286         phyreg[0] = lynx->phy_reg0;
287         for (i = 1; i < 7; i++) {
288                 phyreg[i] = get_phy_reg(lynx, i);
289         }
290
291         /* FIXME? We assume a TSB21LV03A phy here.  This code doesn't support
292            more than 3 ports on the PHY anyway. */
293
294         lsid = 0x80400000 | ((phyreg[0] & 0xfc) << 22);
295         lsid |= (phyreg[1] & 0x3f) << 16; /* gap count */
296         lsid |= (phyreg[2] & 0xc0) << 8; /* max speed */
297         lsid |= (phyreg[6] & 0x01) << 11; /* contender (phy dependent) */
298         /* lsid |= 1 << 11; *//* set contender (hack) */
299         lsid |= (phyreg[6] & 0x10) >> 3; /* initiated reset */
300
301         for (i = 0; i < (phyreg[2] & 0xf); i++) { /* ports */
302                 if (phyreg[3 + i] & 0x4) {
303                         lsid |= (((phyreg[3 + i] & 0x8) | 0x10) >> 3)
304                                 << (6 - i*2);
305                 } else {
306                         lsid |= 1 << (6 - i*2);
307                 }
308         }
309
310         cpu_to_be32s(&lsid);
311         PRINT(KERN_DEBUG, lynx->id, "generated own selfid 0x%x", lsid);
312         return lsid;
313 }
314
315 static void handle_selfid(struct ti_lynx *lynx, struct hpsb_host *host)
316 {
317         quadlet_t *q = lynx->rcv_page;
318         int phyid, isroot, size;
319         quadlet_t lsid = 0;
320         int i;
321
322         if (lynx->phy_reg0 == -1 || lynx->selfid_size == -1) return;
323
324         size = lynx->selfid_size;
325         phyid = lynx->phy_reg0;
326
327         i = (size > 16 ? 16 : size) / 4 - 1;
328         while (i >= 0) {
329                 cpu_to_be32s(&q[i]);
330                 i--;
331         }
332         
333         if (!lynx->phyic.reg_1394a) {
334                 lsid = generate_own_selfid(lynx, host);
335         }
336
337         isroot = (phyid & 2) != 0;
338         phyid >>= 2;
339         PRINT(KERN_INFO, lynx->id, "SelfID process finished (phyid %d, %s)",
340               phyid, (isroot ? "root" : "not root"));
341         reg_write(lynx, LINK_ID, (0xffc0 | phyid) << 16);
342
343         if (!lynx->phyic.reg_1394a && !size) {
344                 hpsb_selfid_received(host, lsid);
345         }
346
347         while (size > 0) {
348                 struct selfid *sid = (struct selfid *)q;
349
350                 if (!lynx->phyic.reg_1394a && !sid->extended 
351                     && (sid->phy_id == (phyid + 1))) {
352                         hpsb_selfid_received(host, lsid);
353                 }
354
355                 if (q[0] == ~q[1]) {
356                         PRINT(KERN_DEBUG, lynx->id, "SelfID packet 0x%x rcvd",
357                               q[0]);
358                         hpsb_selfid_received(host, q[0]);
359                 } else {
360                         PRINT(KERN_INFO, lynx->id,
361                               "inconsistent selfid 0x%x/0x%x", q[0], q[1]);
362                 }
363                 q += 2;
364                 size -= 8;
365         }
366
367         if (!lynx->phyic.reg_1394a && isroot && phyid != 0) {
368                 hpsb_selfid_received(host, lsid);
369         }
370
371         hpsb_selfid_complete(host, phyid, isroot);
372
373         if (host->in_bus_reset) return; /* in bus reset again */
374
375         if (isroot) reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_CYCMASTER);
376         reg_set_bits(lynx, LINK_CONTROL,
377                      LINK_CONTROL_RCV_CMP_VALID | LINK_CONTROL_TX_ASYNC_EN
378                      | LINK_CONTROL_RX_ASYNC_EN | LINK_CONTROL_CYCTIMEREN);
379 }
380
381
382
383 /* This must be called with the respective queue_lock held. */
384 static void send_next(struct ti_lynx *lynx, int what)
385 {
386         struct ti_pcl pcl;
387         struct lynx_send_data *d;
388         struct hpsb_packet *packet;
389
390         d = (what == hpsb_iso ? &lynx->iso_send : &lynx->async);
391         packet = d->queue;
392
393         d->header_dma = pci_map_single(lynx->dev, packet->header,
394                                        packet->header_size, PCI_DMA_TODEVICE);
395         if (packet->data_size) {
396                 d->data_dma = pci_map_single(lynx->dev, packet->data,
397                                              packet->data_size,
398                                              PCI_DMA_TODEVICE);
399         } else {
400                 d->data_dma = 0;
401         }
402
403         pcl.next = PCL_NEXT_INVALID;
404         pcl.async_error_next = PCL_NEXT_INVALID;
405 #ifdef __BIG_ENDIAN
406         pcl.buffer[0].control = packet->speed_code << 14 | packet->header_size;
407 #else
408         pcl.buffer[0].control = packet->speed_code << 14 | packet->header_size 
409                 | PCL_BIGENDIAN;
410 #endif
411         pcl.buffer[0].pointer = d->header_dma;
412         pcl.buffer[1].control = PCL_LAST_BUFF | packet->data_size;
413         pcl.buffer[1].pointer = d->data_dma;
414
415         switch (packet->type) {
416         case hpsb_async:
417                 pcl.buffer[0].control |= PCL_CMD_XMT;
418                 break;
419         case hpsb_iso:
420                 pcl.buffer[0].control |= PCL_CMD_XMT | PCL_ISOMODE;
421                 break;
422         case hpsb_raw:
423                 pcl.buffer[0].control |= PCL_CMD_UNFXMT;
424                 break;
425         }                
426
427         if (!packet->data_be) {
428                 pcl.buffer[1].control |= PCL_BIGENDIAN;
429         }
430
431         put_pcl(lynx, d->pcl, &pcl);
432         run_pcl(lynx, d->pcl_start, d->channel);
433 }
434
435
436 /* called from subsystem core */
437 static int lynx_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
438 {
439         struct ti_lynx *lynx = host->hostdata;
440         struct lynx_send_data *d;
441         unsigned long flags;
442
443         if (packet->data_size >= 4096) {
444                 PRINT(KERN_ERR, lynx->id, "transmit packet data too big (%Zd)",
445                       packet->data_size);
446                 return 0;
447         }
448
449         switch (packet->type) {
450         case hpsb_async:
451         case hpsb_raw:
452                 d = &lynx->async;
453                 break;
454         case hpsb_iso:
455                 d = &lynx->iso_send;
456                 break;
457         default:
458                 PRINT(KERN_ERR, lynx->id, "invalid packet type %d",
459                       packet->type);
460                 return 0;
461         }
462
463         packet->xnext = NULL;
464         if (packet->tcode == TCODE_WRITEQ
465             || packet->tcode == TCODE_READQ_RESPONSE) {
466                 cpu_to_be32s(&packet->header[3]);
467         }
468
469         spin_lock_irqsave(&d->queue_lock, flags);
470
471         if (d->queue == NULL) {
472                 d->queue = packet;
473                 d->queue_last = packet;
474                 send_next(lynx, packet->type);
475         } else {
476                 d->queue_last->xnext = packet;
477                 d->queue_last = packet;
478         }
479
480         spin_unlock_irqrestore(&d->queue_lock, flags);
481
482         return 1;
483 }
484
485
486 /* called from subsystem core */
487 static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
488 {
489         struct ti_lynx *lynx = host->hostdata;
490         int retval = 0;
491         struct hpsb_packet *packet, *lastpacket;
492         unsigned long flags;
493
494         switch (cmd) {
495         case RESET_BUS:
496                 if (reg_read(lynx, LINK_INT_STATUS) & LINK_INT_PHY_BUSRESET) {
497                         retval = 0;
498                         break;
499                 }
500
501                 if (arg) {
502                         arg = 3 << 6;
503                 } else {
504                         arg = 1 << 6;
505                 }
506
507                 retval = get_phy_reg(lynx, 1);
508                 arg |= (retval == -1 ? 63 : retval);
509                 retval = 0;
510
511                 PRINT(KERN_INFO, lynx->id, "resetting bus on request");
512
513                 lynx->selfid_size = -1;
514                 lynx->phy_reg0 = -1;
515                 set_phy_reg(lynx, 1, arg);
516                 break;
517
518         case GET_CYCLE_COUNTER:
519                 retval = reg_read(lynx, CYCLE_TIMER);
520                 break;
521                 
522         case SET_CYCLE_COUNTER:
523                 reg_write(lynx, CYCLE_TIMER, arg);
524                 break;
525
526         case SET_BUS_ID:
527                 reg_write(lynx, LINK_ID, 
528                           (arg << 22) | (reg_read(lynx, LINK_ID) & 0x003f0000));
529                 break;
530                 
531         case ACT_CYCLE_MASTER:
532                 if (arg) {
533                         reg_set_bits(lynx, LINK_CONTROL,
534                                      LINK_CONTROL_CYCMASTER);
535                 } else {
536                         reg_clear_bits(lynx, LINK_CONTROL,
537                                        LINK_CONTROL_CYCMASTER);
538                 }
539                 break;
540
541         case CANCEL_REQUESTS:
542                 spin_lock_irqsave(&lynx->async.queue_lock, flags);
543
544                 reg_write(lynx, DMA_CHAN_CTRL(CHANNEL_ASYNC_SEND), 0);
545                 packet = lynx->async.queue;
546                 lynx->async.queue = NULL;
547
548                 spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
549
550                 while (packet != NULL) {
551                         lastpacket = packet;
552                         packet = packet->xnext;
553                         hpsb_packet_sent(host, lastpacket, ACKX_ABORTED);
554                 }
555
556                 break;
557
558         case MODIFY_USAGE:
559                 if (arg) {
560                         MOD_INC_USE_COUNT;
561                 } else {
562                         MOD_DEC_USE_COUNT;
563                 }
564
565                 retval = 1;
566                 break;
567
568         case ISO_LISTEN_CHANNEL:
569                 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
570                 
571                 if (lynx->iso_rcv.chan_count++ == 0) {
572                         reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
573                                   DMA_WORD1_CMP_ENABLE_MASTER);
574                 }
575
576                 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
577                 break;
578
579         case ISO_UNLISTEN_CHANNEL:
580                 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
581
582                 if (--lynx->iso_rcv.chan_count == 0) {
583                         reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
584                                   0);
585                 }
586
587                 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
588                 break;
589
590         default:
591                 PRINT(KERN_ERR, lynx->id, "unknown devctl command %d", cmd);
592                 retval = -1;
593         }
594
595         return retval;
596 }
597
598
599 /***************************************
600  * IEEE-1394 functionality section END *
601  ***************************************/
602
603 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
604 /* VFS functions for local bus / aux device access.  Access to those
605  * is implemented as a character device instead of block devices
606  * because buffers are not wanted for this.  Therefore llseek (from
607  * VFS) can be used for these char devices with obvious effects.
608  */
609 static int mem_open(struct inode*, struct file*);
610 static int mem_release(struct inode*, struct file*);
611 static unsigned int aux_poll(struct file*, struct poll_table_struct*);
612 static loff_t mem_llseek(struct file*, loff_t, int);
613 static ssize_t mem_read (struct file*, char*, size_t, loff_t*);
614 static ssize_t mem_write(struct file*, const char*, size_t, loff_t*);
615
616
617 static struct file_operations aux_ops = {
618         OWNER_THIS_MODULE
619         read:           mem_read,
620         write:          mem_write,
621         poll:           aux_poll,
622         llseek:         mem_llseek,
623         open:           mem_open,
624         release:        mem_release,
625 };
626
627
628 static void aux_setup_pcls(struct ti_lynx *lynx)
629 {
630         struct ti_pcl pcl;
631
632         pcl.next = PCL_NEXT_INVALID;
633         pcl.user_data = pcl_bus(lynx, lynx->dmem_pcl);
634         put_pcl(lynx, lynx->dmem_pcl, &pcl);
635 }
636
637 static int mem_open(struct inode *inode, struct file *file)
638 {
639         int cid = MINOR(inode->i_rdev);
640         enum { t_rom, t_aux, t_ram } type;
641         struct memdata *md;
642         
643         V22_COMPAT_MOD_INC_USE_COUNT;
644
645         if (cid < PCILYNX_MINOR_AUX_START) {
646                 /* just for completeness */
647                 V22_COMPAT_MOD_DEC_USE_COUNT;
648                 return -ENXIO;
649         } else if (cid < PCILYNX_MINOR_ROM_START) {
650                 cid -= PCILYNX_MINOR_AUX_START;
651                 if (cid >= num_of_cards || !cards[cid].aux_port) {
652                         V22_COMPAT_MOD_DEC_USE_COUNT;
653                         return -ENXIO;
654                 }
655                 type = t_aux;
656         } else if (cid < PCILYNX_MINOR_RAM_START) {
657                 cid -= PCILYNX_MINOR_ROM_START;
658                 if (cid >= num_of_cards || !cards[cid].local_rom) {
659                         V22_COMPAT_MOD_DEC_USE_COUNT;
660                         return -ENXIO;
661                 }
662                 type = t_rom;
663         } else {
664                 /* WARNING: Know what you are doing when opening RAM.
665                  * It is currently used inside the driver! */
666                 cid -= PCILYNX_MINOR_RAM_START;
667                 if (cid >= num_of_cards || !cards[cid].local_ram) {
668                         V22_COMPAT_MOD_DEC_USE_COUNT;
669                         return -ENXIO;
670                 }
671                 type = t_ram;
672         }
673
674         md = (struct memdata *)kmalloc(sizeof(struct memdata), SLAB_KERNEL);
675         if (md == NULL) {
676                 V22_COMPAT_MOD_DEC_USE_COUNT;
677                 return -ENOMEM;
678         }
679
680         md->lynx = &cards[cid];
681         md->cid = cid;
682
683         switch (type) {
684         case t_rom:
685                 md->type = rom;
686                 break;
687         case t_ram:
688                 md->type = ram;
689                 break;
690         case t_aux:
691                 atomic_set(&md->aux_intr_last_seen,
692                            atomic_read(&cards[cid].aux_intr_seen));
693                 md->type = aux;
694                 break;
695         }
696
697         file->private_data = md;
698
699         return 0;
700 }
701
702 static int mem_release(struct inode *inode, struct file *file)
703 {
704         struct memdata *md = (struct memdata *)file->private_data;
705
706         kfree(md);
707
708         V22_COMPAT_MOD_DEC_USE_COUNT;
709         return 0;
710 }
711
712 static unsigned int aux_poll(struct file *file, poll_table *pt)
713 {
714         struct memdata *md = (struct memdata *)file->private_data;
715         int cid = md->cid;
716         unsigned int mask;
717
718         /* reading and writing is always allowed */
719         mask = POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM;
720
721         if (md->type == aux) {
722                 poll_wait(file, &cards[cid].aux_intr_wait, pt);
723
724                 if (atomic_read(&md->aux_intr_last_seen)
725                     != atomic_read(&cards[cid].aux_intr_seen)) {
726                         mask |= POLLPRI;
727                         atomic_inc(&md->aux_intr_last_seen);
728                 }
729         }
730
731         return mask;
732 }
733
734 loff_t mem_llseek(struct file *file, loff_t offs, int orig)
735 {
736         loff_t newoffs = -1;
737
738         lock_kernel();
739         switch (orig) {
740         case 0:
741                 newoffs = offs;
742                 break;
743         case 1:
744                 newoffs = offs + file->f_pos;
745                 break;
746         case 2:
747                 newoffs = PCILYNX_MAX_MEMORY + 1 + offs;
748         }
749
750         if (newoffs < 0 || newoffs > PCILYNX_MAX_MEMORY + 1) {
751                 unlock_kernel();
752                 return -EINVAL;
753         }
754
755         unlock_kernel();
756         file->f_pos = newoffs;
757         return newoffs;
758 }
759
760 /* 
761  * do not DMA if count is too small because this will have a serious impact 
762  * on performance - the value 2400 was found by experiment and may not work
763  * everywhere as good as here - use mem_mindma option for modules to change 
764  */
765 short mem_mindma = 2400;
766 MODULE_PARM(mem_mindma, "h");
767
768 static ssize_t mem_dmaread(struct memdata *md, u32 physbuf, ssize_t count,
769                            int offset)
770 {
771         pcltmp_t pcltmp;
772         struct ti_pcl *pcl;
773         size_t retval;
774         int i;
775         DECLARE_WAITQUEUE(wait, current);
776
777         count &= ~3;
778         count = MIN(count, 53196);
779         retval = count;
780
781         if (reg_read(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS))
782             & DMA_CHAN_CTRL_BUSY) {
783                 PRINT(KERN_WARNING, md->lynx->id, "DMA ALREADY ACTIVE!");
784         }
785
786         reg_write(md->lynx, LBUS_ADDR, md->type | offset);
787
788         pcl = edit_pcl(md->lynx, md->lynx->dmem_pcl, &pcltmp);
789         pcl->buffer[0].control = PCL_CMD_LBUS_TO_PCI | MIN(count, 4092);
790         pcl->buffer[0].pointer = physbuf;
791         count -= 4092;
792
793         i = 0;
794         while (count > 0) {
795                 i++;
796                 pcl->buffer[i].control = MIN(count, 4092);
797                 pcl->buffer[i].pointer = physbuf + i * 4092;
798                 count -= 4092;
799         }
800         pcl->buffer[i].control |= PCL_LAST_BUFF;
801         commit_pcl(md->lynx, md->lynx->dmem_pcl, &pcltmp);
802
803         set_current_state(TASK_INTERRUPTIBLE);
804         add_wait_queue(&md->lynx->mem_dma_intr_wait, &wait);
805         run_sub_pcl(md->lynx, md->lynx->dmem_pcl, 2, CHANNEL_LOCALBUS);
806
807         schedule();
808         while (reg_read(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS))
809                & DMA_CHAN_CTRL_BUSY) {
810                 if (signal_pending(current)) {
811                         retval = -EINTR;
812                         break;
813                 }
814                 schedule();
815         }
816
817         reg_write(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS), 0);
818         remove_wait_queue(&md->lynx->mem_dma_intr_wait, &wait);
819
820         if (reg_read(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS))
821             & DMA_CHAN_CTRL_BUSY) {
822                 PRINT(KERN_ERR, md->lynx->id, "DMA STILL ACTIVE!");
823         }
824
825         return retval;
826 }
827
828 static ssize_t mem_read(struct file *file, char *buffer, size_t count,
829                         loff_t *offset)
830 {
831         struct memdata *md = (struct memdata *)file->private_data;
832         ssize_t bcount;
833         size_t alignfix;
834         int off = (int)*offset; /* avoid useless 64bit-arithmetic */
835         ssize_t retval;
836         void *membase;
837
838         if ((off + count) > PCILYNX_MAX_MEMORY + 1) {
839                 count = PCILYNX_MAX_MEMORY + 1 - off;
840         }
841         if (count == 0) {
842                 return 0;
843         }
844
845
846         switch (md->type) {
847         case rom:
848                 membase = md->lynx->local_rom;
849                 break;
850         case ram:
851                 membase = md->lynx->local_ram;
852                 break;
853         case aux:
854                 membase = md->lynx->aux_port;
855                 break;
856         default:
857                 panic("pcilynx%d: unsupported md->type %d in %s",
858                       md->lynx->id, md->type, __FUNCTION__);
859         }
860
861         down(&md->lynx->mem_dma_mutex);
862
863         if (count < mem_mindma) {
864                 memcpy_fromio(md->lynx->mem_dma_buffer, membase+off, count);
865                 goto out;
866         }
867
868         bcount = count;
869         alignfix = 4 - (off % 4);
870         if (alignfix != 4) {
871                 if (bcount < alignfix) {
872                         alignfix = bcount;
873                 }
874                 memcpy_fromio(md->lynx->mem_dma_buffer, membase+off,
875                               alignfix);
876                 if (bcount == alignfix) {
877                         goto out;
878                 }
879                 bcount -= alignfix;
880                 off += alignfix;
881         }
882
883         while (bcount >= 4) {
884                 retval = mem_dmaread(md, md->lynx->mem_dma_buffer_dma
885                                      + count - bcount, bcount, off);
886                 if (retval < 0) return retval;
887
888                 bcount -= retval;
889                 off += retval;
890         }
891
892         if (bcount) {
893                 memcpy_fromio(md->lynx->mem_dma_buffer + count - bcount,
894                               membase+off, bcount);
895         }
896
897  out:
898         retval = copy_to_user(buffer, md->lynx->mem_dma_buffer, count);
899         up(&md->lynx->mem_dma_mutex);
900
901         if (retval < 0) return retval;
902         *offset += count;
903         return count;
904 }
905
906
907 static ssize_t mem_write(struct file *file, const char *buffer, size_t count, 
908                          loff_t *offset)
909 {
910         struct memdata *md = (struct memdata *)file->private_data;
911
912         if (((*offset) + count) > PCILYNX_MAX_MEMORY+1) {
913                 count = PCILYNX_MAX_MEMORY+1 - *offset;
914         }
915         if (count == 0 || *offset > PCILYNX_MAX_MEMORY) {
916                 return -ENOSPC;
917         }
918
919         /* FIXME: dereferencing pointers to PCI mem doesn't work everywhere */
920         switch (md->type) {
921         case aux:
922                 copy_from_user(md->lynx->aux_port+(*offset), buffer, count);
923                 break;
924         case ram:
925                 copy_from_user(md->lynx->local_ram+(*offset), buffer, count);
926                 break;
927         case rom:
928                 /* the ROM may be writeable */
929                 copy_from_user(md->lynx->local_rom+(*offset), buffer, count);
930                 break;
931         }
932
933         file->f_pos += count;
934         return count;
935 }
936 #endif /* CONFIG_IEEE1394_PCILYNX_PORTS */
937
938
939 /********************************************************
940  * Global stuff (interrupt handler, init/shutdown code) *
941  ********************************************************/
942
943
944 static void lynx_irq_handler(int irq, void *dev_id,
945                              struct pt_regs *regs_are_unused)
946 {
947         struct ti_lynx *lynx = (struct ti_lynx *)dev_id;
948         struct hpsb_host *host = lynx->host;
949         u32 intmask;
950         u32 linkint;
951
952         linkint = reg_read(lynx, LINK_INT_STATUS);
953         intmask = reg_read(lynx, PCI_INT_STATUS);
954
955         PRINTD(KERN_DEBUG, lynx->id, "interrupt: 0x%08x / 0x%08x", intmask,
956                linkint);
957
958         if (!(intmask & PCI_INT_INT_PEND)) return;
959
960         reg_write(lynx, LINK_INT_STATUS, linkint);
961         reg_write(lynx, PCI_INT_STATUS, intmask);
962
963 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
964         if (intmask & PCI_INT_AUX_INT) {
965                 atomic_inc(&lynx->aux_intr_seen);
966                 wake_up_interruptible(&lynx->aux_intr_wait);
967         }
968
969         if (intmask & PCI_INT_DMA_HLT(CHANNEL_LOCALBUS)) {
970                 wake_up_interruptible(&lynx->mem_dma_intr_wait);
971         }
972 #endif
973
974
975         if (intmask & PCI_INT_1394) {
976                 if (linkint & LINK_INT_PHY_TIMEOUT) {
977                         PRINT(KERN_INFO, lynx->id, "PHY timeout occurred");
978                 }
979                 if (linkint & LINK_INT_PHY_BUSRESET) {
980                         PRINT(KERN_INFO, lynx->id, "bus reset interrupt");
981                         lynx->selfid_size = -1;
982                         lynx->phy_reg0 = -1;
983                         if (!host->in_bus_reset)
984                                 hpsb_bus_reset(host);
985                 }
986                 if (linkint & LINK_INT_PHY_REG_RCVD) {
987                         u32 reg;
988
989                         spin_lock(&lynx->phy_reg_lock);
990                         reg = reg_read(lynx, LINK_PHY);
991                         spin_unlock(&lynx->phy_reg_lock);
992
993                         if (!host->in_bus_reset) {
994                                 PRINT(KERN_INFO, lynx->id,
995                                       "phy reg received without reset");
996                         } else if (reg & 0xf00) {
997                                 PRINT(KERN_INFO, lynx->id,
998                                       "unsolicited phy reg %d received",
999                                       (reg >> 8) & 0xf);
1000                         } else {
1001                                 lynx->phy_reg0 = reg & 0xff;
1002                                 handle_selfid(lynx, host);
1003                         }
1004                 }
1005                 if (linkint & LINK_INT_ISO_STUCK) {
1006                         PRINT(KERN_INFO, lynx->id, "isochronous transmitter stuck");
1007                 }
1008                 if (linkint & LINK_INT_ASYNC_STUCK) {
1009                         PRINT(KERN_INFO, lynx->id, "asynchronous transmitter stuck");
1010                 }
1011                 if (linkint & LINK_INT_SENT_REJECT) {
1012                         PRINT(KERN_INFO, lynx->id, "sent reject");
1013                 }
1014                 if (linkint & LINK_INT_TX_INVALID_TC) {
1015                         PRINT(KERN_INFO, lynx->id, "invalid transaction code");
1016                 }
1017                 if (linkint & LINK_INT_GRF_OVERFLOW) {
1018                         /* flush FIFO if overflow happens during reset */
1019                         if (host->in_bus_reset)
1020                                 reg_write(lynx, FIFO_CONTROL,
1021                                           FIFO_CONTROL_GRF_FLUSH);
1022                         PRINT(KERN_INFO, lynx->id, "GRF overflow");
1023                 }
1024                 if (linkint & LINK_INT_ITF_UNDERFLOW) {
1025                         PRINT(KERN_INFO, lynx->id, "ITF underflow");
1026                 }
1027                 if (linkint & LINK_INT_ATF_UNDERFLOW) {
1028                         PRINT(KERN_INFO, lynx->id, "ATF underflow");
1029                 }
1030         }
1031
1032         if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_RCV)) {
1033                 PRINTD(KERN_DEBUG, lynx->id, "iso receive");
1034
1035                 spin_lock(&lynx->iso_rcv.lock);
1036
1037                 lynx->iso_rcv.stat[lynx->iso_rcv.next] =
1038                         reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ISO_RCV));
1039
1040                 lynx->iso_rcv.used++;
1041                 lynx->iso_rcv.next = (lynx->iso_rcv.next + 1) % NUM_ISORCV_PCL;
1042
1043                 if ((lynx->iso_rcv.next == lynx->iso_rcv.last)
1044                     || !lynx->iso_rcv.chan_count) {
1045                         PRINTD(KERN_DEBUG, lynx->id, "stopped");
1046                         reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
1047                 }
1048
1049                 run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, lynx->iso_rcv.next,
1050                             CHANNEL_ISO_RCV);
1051
1052                 spin_unlock(&lynx->iso_rcv.lock);
1053
1054                 tasklet_schedule(&lynx->iso_rcv.tq);
1055         }
1056
1057         if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_SEND)) {
1058                 u32 ack;
1059                 struct hpsb_packet *packet;
1060                 
1061                 spin_lock(&lynx->async.queue_lock);
1062
1063                 ack = reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ASYNC_SEND));
1064                 packet = lynx->async.queue;
1065                 lynx->async.queue = packet->xnext;
1066
1067                 pci_unmap_single(lynx->dev, lynx->async.header_dma,
1068                                  packet->header_size, PCI_DMA_TODEVICE);
1069                 if (packet->data_size) {
1070                         pci_unmap_single(lynx->dev, lynx->async.data_dma,
1071                                          packet->data_size, PCI_DMA_TODEVICE);
1072                 }
1073
1074                 if (lynx->async.queue != NULL) {
1075                         send_next(lynx, hpsb_async);
1076                 }
1077
1078                 spin_unlock(&lynx->async.queue_lock);
1079
1080                 if (ack & DMA_CHAN_STAT_SPECIALACK) {
1081                         ack = (ack >> 15) & 0xf;
1082                         PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
1083                         ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
1084                 } else {
1085                         ack = (ack >> 15) & 0xf;
1086                 }
1087                 
1088                 hpsb_packet_sent(host, packet, ack);
1089         }
1090
1091         if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_SEND)) {
1092                 struct hpsb_packet *packet;
1093
1094                 spin_lock(&lynx->iso_send.queue_lock);
1095
1096                 packet = lynx->iso_send.queue;
1097                 lynx->iso_send.queue = packet->xnext;
1098
1099                 pci_unmap_single(lynx->dev, lynx->iso_send.header_dma,
1100                                  packet->header_size, PCI_DMA_TODEVICE);
1101                 if (packet->data_size) {
1102                         pci_unmap_single(lynx->dev, lynx->iso_send.data_dma,
1103                                          packet->data_size, PCI_DMA_TODEVICE);
1104                 }
1105
1106                 if (lynx->iso_send.queue != NULL) {
1107                         send_next(lynx, hpsb_iso);
1108                 }
1109
1110                 spin_unlock(&lynx->iso_send.queue_lock);
1111
1112                 hpsb_packet_sent(host, packet, ACK_COMPLETE);
1113         }
1114
1115         if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_RCV)) {
1116                 /* general receive DMA completed */
1117                 int stat = reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ASYNC_RCV));
1118
1119                 PRINTD(KERN_DEBUG, lynx->id, "received packet size %d",
1120                        stat & 0x1fff); 
1121
1122                 if (stat & DMA_CHAN_STAT_SELFID) {
1123                         lynx->selfid_size = stat & 0x1fff;
1124                         handle_selfid(lynx, host);
1125                 } else {
1126                         quadlet_t *q_data = lynx->rcv_page;
1127                         if ((*q_data >> 4 & 0xf) == TCODE_READQ_RESPONSE
1128                             || (*q_data >> 4 & 0xf) == TCODE_WRITEQ) {
1129                                 cpu_to_be32s(q_data + 3);
1130                         }
1131                         hpsb_packet_received(host, q_data, stat & 0x1fff, 0);
1132                 }
1133
1134                 run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
1135         }
1136 }
1137
1138
1139 static void iso_rcv_bh(struct ti_lynx *lynx)
1140 {
1141         unsigned int idx;
1142         quadlet_t *data;
1143         unsigned long flags;
1144
1145         spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
1146
1147         while (lynx->iso_rcv.used) {
1148                 idx = lynx->iso_rcv.last;
1149                 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
1150
1151                 data = lynx->iso_rcv.page[idx / ISORCV_PER_PAGE]
1152                         + (idx % ISORCV_PER_PAGE) * MAX_ISORCV_SIZE;
1153
1154                 if ((*data >> 16) + 4 != (lynx->iso_rcv.stat[idx] & 0x1fff)) {
1155                         PRINT(KERN_ERR, lynx->id,
1156                               "iso length mismatch 0x%08x/0x%08x", *data,
1157                               lynx->iso_rcv.stat[idx]);
1158                 }
1159
1160                 if (lynx->iso_rcv.stat[idx] 
1161                     & (DMA_CHAN_STAT_PCIERR | DMA_CHAN_STAT_PKTERR)) {
1162                         PRINT(KERN_INFO, lynx->id,
1163                               "iso receive error on %d to 0x%p", idx, data);
1164                 } else {
1165                         hpsb_packet_received(lynx->host, data,
1166                                              lynx->iso_rcv.stat[idx] & 0x1fff,
1167                                              0);
1168                 }
1169
1170                 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
1171                 lynx->iso_rcv.last = (idx + 1) % NUM_ISORCV_PCL;
1172                 lynx->iso_rcv.used--;
1173         }
1174
1175         if (lynx->iso_rcv.chan_count) {
1176                 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
1177                           DMA_WORD1_CMP_ENABLE_MASTER);
1178         }
1179         spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
1180 }
1181
1182
1183 static void remove_card(struct pci_dev *dev)
1184 {
1185         struct ti_lynx *lynx;
1186         int i;
1187
1188         lynx = pci_get_drvdata(dev);
1189         if (!lynx) return;
1190         pci_set_drvdata(dev, NULL);
1191
1192         switch (lynx->state) {
1193         case is_host:
1194                 reg_write(lynx, PCI_INT_ENABLE, 0);
1195                 hpsb_remove_host(lynx->host);
1196         case have_intr:
1197                 reg_write(lynx, PCI_INT_ENABLE, 0);
1198                 free_irq(lynx->dev->irq, lynx);
1199         case have_iomappings:
1200                 reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
1201                 /* Fix buggy cards with autoboot pin not tied low: */
1202                 reg_write(lynx, DMA0_CHAN_CTRL, 0);
1203                 iounmap(lynx->registers);
1204                 iounmap(lynx->local_rom);
1205                 iounmap(lynx->local_ram);
1206                 iounmap(lynx->aux_port);
1207         case have_1394_buffers:
1208                 for (i = 0; i < ISORCV_PAGES; i++) {
1209                         if (lynx->iso_rcv.page[i]) {
1210                                 pci_free_consistent(lynx->dev, PAGE_SIZE,
1211                                                     lynx->iso_rcv.page[i],
1212                                                     lynx->iso_rcv.page_dma[i]);
1213                         }
1214                 }
1215                 pci_free_consistent(lynx->dev, PAGE_SIZE, lynx->rcv_page,
1216                                     lynx->rcv_page_dma);
1217         case have_aux_buf:
1218 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1219                 pci_free_consistent(lynx->dev, 65536, lynx->mem_dma_buffer,
1220                                     lynx->mem_dma_buffer_dma);
1221 #endif
1222         case have_pcl_mem:
1223 #ifndef CONFIG_IEEE1394_PCILYNX_LOCALRAM
1224                 pci_free_consistent(lynx->dev, LOCALRAM_SIZE, lynx->pcl_mem,
1225                                     lynx->pcl_mem_dma);
1226 #endif
1227         case clear:
1228                 /* do nothing - already freed */
1229                 ;
1230         }
1231
1232         tasklet_kill(&lynx->iso_rcv.tq);
1233         kfree(lynx);
1234 }
1235
1236
1237 static int __devinit add_card(struct pci_dev *dev,
1238                               const struct pci_device_id *devid_is_unused)
1239 {
1240 #define FAIL(fmt, args...) do { \
1241         PRINT_G(KERN_ERR, fmt , ## args); \
1242         remove_card(dev); \
1243         return error; \
1244         } while (0)
1245
1246         struct hpsb_host *host;
1247         struct ti_lynx *lynx; /* shortcut to currently handled device */
1248         struct ti_pcl pcl;
1249         u32 *pcli;
1250         int i;
1251         int error;
1252
1253
1254         error = -ENXIO;
1255
1256         if (pci_set_dma_mask(dev, 0xffffffff))
1257                 FAIL("DMA address limits not supported for PCILynx hardware");
1258         if (pci_enable_device(dev))
1259                 FAIL("failed to enable PCILynx hardware");
1260         pci_set_master(dev);
1261
1262         error = -ENOMEM;
1263
1264         host = hpsb_alloc_host(lynx_driver, sizeof(struct ti_lynx));
1265         if (!host) FAIL("failed to allocate control structure memory");
1266
1267         lynx = host->hostdata;
1268         lynx->id = card_id++;
1269         lynx->dev = dev;
1270         lynx->state = clear;
1271         lynx->host = host;
1272         host->pdev = dev;
1273         pci_set_drvdata(dev, lynx);
1274
1275         lynx->lock = SPIN_LOCK_UNLOCKED;
1276         lynx->phy_reg_lock = SPIN_LOCK_UNLOCKED;
1277
1278 #ifndef CONFIG_IEEE1394_PCILYNX_LOCALRAM
1279         lynx->pcl_mem = pci_alloc_consistent(dev, LOCALRAM_SIZE,
1280                                              &lynx->pcl_mem_dma);
1281
1282         if (lynx->pcl_mem != NULL) {
1283                 lynx->state = have_pcl_mem;
1284                 PRINT(KERN_INFO, lynx->id, 
1285                       "allocated PCL memory %d Bytes @ 0x%p", LOCALRAM_SIZE,
1286                       lynx->pcl_mem);
1287         } else {
1288                 FAIL("failed to allocate PCL memory area");
1289         }
1290 #endif
1291
1292 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1293         lynx->mem_dma_buffer = pci_alloc_consistent(dev, 65536,
1294                                                     &lynx->mem_dma_buffer_dma);
1295         if (lynx->mem_dma_buffer == NULL) {
1296                 FAIL("failed to allocate DMA buffer for aux");
1297         }
1298         lynx->state = have_aux_buf;
1299 #endif
1300
1301         lynx->rcv_page = pci_alloc_consistent(dev, PAGE_SIZE,
1302                                               &lynx->rcv_page_dma);
1303         if (lynx->rcv_page == NULL) {
1304                 FAIL("failed to allocate receive buffer");
1305         }
1306         lynx->state = have_1394_buffers;
1307
1308         for (i = 0; i < ISORCV_PAGES; i++) {
1309                 lynx->iso_rcv.page[i] =
1310                         pci_alloc_consistent(dev, PAGE_SIZE,
1311                                              &lynx->iso_rcv.page_dma[i]);
1312                 if (lynx->iso_rcv.page[i] == NULL) {
1313                         FAIL("failed to allocate iso receive buffers");
1314                 }
1315         }
1316
1317         lynx->registers = ioremap_nocache(pci_resource_start(dev,0),
1318                                           PCILYNX_MAX_REGISTER);
1319         lynx->local_ram = ioremap(pci_resource_start(dev,1), PCILYNX_MAX_MEMORY);
1320         lynx->aux_port  = ioremap(pci_resource_start(dev,2), PCILYNX_MAX_MEMORY);
1321         lynx->local_rom = ioremap(pci_resource_start(dev,PCI_ROM_RESOURCE),
1322                                   PCILYNX_MAX_MEMORY);
1323         lynx->state = have_iomappings;
1324
1325         if (lynx->registers == NULL) {
1326                 FAIL("failed to remap registers - card not accessible");
1327         }
1328
1329 #ifdef CONFIG_IEEE1394_PCILYNX_LOCALRAM
1330         if (lynx->local_ram == NULL) {
1331                 FAIL("failed to remap local RAM which is required for "
1332                      "operation");
1333         }
1334 #endif
1335
1336         reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
1337         /* Fix buggy cards with autoboot pin not tied low: */
1338         reg_write(lynx, DMA0_CHAN_CTRL, 0);
1339
1340         if (!request_irq(dev->irq, lynx_irq_handler, SA_SHIRQ,
1341                          PCILYNX_DRIVER_NAME, lynx)) {
1342                 PRINT(KERN_INFO, lynx->id, "allocated interrupt %d", dev->irq);
1343                 lynx->state = have_intr;
1344         } else {
1345                 FAIL("failed to allocate shared interrupt %d", dev->irq);
1346         }
1347
1348         /* alloc_pcl return values are not checked, it is expected that the
1349          * provided PCL space is sufficient for the initial allocations */
1350 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1351         if (lynx->aux_port != NULL) {
1352                 lynx->dmem_pcl = alloc_pcl(lynx);
1353                 aux_setup_pcls(lynx);
1354                 sema_init(&lynx->mem_dma_mutex, 1);
1355         }
1356 #endif
1357         lynx->rcv_pcl = alloc_pcl(lynx);
1358         lynx->rcv_pcl_start = alloc_pcl(lynx);
1359         lynx->async.pcl = alloc_pcl(lynx);
1360         lynx->async.pcl_start = alloc_pcl(lynx);
1361         lynx->iso_send.pcl = alloc_pcl(lynx);
1362         lynx->iso_send.pcl_start = alloc_pcl(lynx);
1363
1364         for (i = 0; i < NUM_ISORCV_PCL; i++) {
1365                 lynx->iso_rcv.pcl[i] = alloc_pcl(lynx);
1366         }
1367         lynx->iso_rcv.pcl_start = alloc_pcl(lynx);
1368
1369         /* all allocations successful - simple init stuff follows */
1370
1371         reg_write(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
1372
1373 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1374         reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_AUX_INT);
1375         init_waitqueue_head(&lynx->mem_dma_intr_wait);
1376         init_waitqueue_head(&lynx->aux_intr_wait);
1377 #endif
1378
1379         tasklet_init(&lynx->iso_rcv.tq, (void (*)(unsigned long))iso_rcv_bh,
1380                      (unsigned long)lynx);
1381
1382         lynx->iso_rcv.lock = SPIN_LOCK_UNLOCKED;
1383
1384         lynx->async.queue_lock = SPIN_LOCK_UNLOCKED;
1385         lynx->async.channel = CHANNEL_ASYNC_SEND;
1386         lynx->iso_send.queue_lock = SPIN_LOCK_UNLOCKED;
1387         lynx->iso_send.channel = CHANNEL_ISO_SEND;
1388         
1389         PRINT(KERN_INFO, lynx->id, "remapped memory spaces reg 0x%p, rom 0x%p, "
1390               "ram 0x%p, aux 0x%p", lynx->registers, lynx->local_rom,
1391               lynx->local_ram, lynx->aux_port);
1392
1393         /* now, looking for PHY register set */
1394         if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
1395                 lynx->phyic.reg_1394a = 1;
1396                 PRINT(KERN_INFO, lynx->id,
1397                       "found 1394a conform PHY (using extended register set)");
1398                 lynx->phyic.vendor = get_phy_vendorid(lynx);
1399                 lynx->phyic.product = get_phy_productid(lynx);
1400         } else {
1401                 lynx->phyic.reg_1394a = 0;
1402                 PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
1403         }
1404
1405         lynx->selfid_size = -1;
1406         lynx->phy_reg0 = -1;
1407
1408         lynx->async.queue = NULL;
1409
1410         pcl.next = pcl_bus(lynx, lynx->rcv_pcl);
1411         put_pcl(lynx, lynx->rcv_pcl_start, &pcl);
1412
1413         pcl.next = PCL_NEXT_INVALID;
1414         pcl.async_error_next = PCL_NEXT_INVALID;
1415 #ifdef __BIG_ENDIAN
1416         pcl.buffer[0].control = PCL_CMD_RCV | 16;
1417         pcl.buffer[1].control = PCL_LAST_BUFF | 4080;
1418 #else
1419         pcl.buffer[0].control = PCL_CMD_RCV | PCL_BIGENDIAN | 16;
1420         pcl.buffer[1].control = PCL_LAST_BUFF | 4080;
1421 #endif
1422         pcl.buffer[0].pointer = lynx->rcv_page_dma;
1423         pcl.buffer[1].pointer = lynx->rcv_page_dma + 16;
1424         put_pcl(lynx, lynx->rcv_pcl, &pcl);
1425         
1426         pcl.next = pcl_bus(lynx, lynx->async.pcl);
1427         pcl.async_error_next = pcl_bus(lynx, lynx->async.pcl);
1428         put_pcl(lynx, lynx->async.pcl_start, &pcl);
1429
1430         pcl.next = pcl_bus(lynx, lynx->iso_send.pcl);
1431         pcl.async_error_next = PCL_NEXT_INVALID;
1432         put_pcl(lynx, lynx->iso_send.pcl_start, &pcl);
1433
1434         pcl.next = PCL_NEXT_INVALID;
1435         pcl.async_error_next = PCL_NEXT_INVALID;
1436         pcl.buffer[0].control = PCL_CMD_RCV | 4;
1437 #ifndef __BIG_ENDIAN
1438         pcl.buffer[0].control |= PCL_BIGENDIAN;
1439 #endif
1440         pcl.buffer[1].control = PCL_LAST_BUFF | 2044;
1441
1442         for (i = 0; i < NUM_ISORCV_PCL; i++) {
1443                 int page = i / ISORCV_PER_PAGE;
1444                 int sec = i % ISORCV_PER_PAGE;
1445
1446                 pcl.buffer[0].pointer = lynx->iso_rcv.page_dma[page] 
1447                         + sec * MAX_ISORCV_SIZE;
1448                 pcl.buffer[1].pointer = pcl.buffer[0].pointer + 4;
1449                 put_pcl(lynx, lynx->iso_rcv.pcl[i], &pcl);
1450         }
1451
1452         pcli = (u32 *)&pcl;
1453         for (i = 0; i < NUM_ISORCV_PCL; i++) {
1454                 pcli[i] = pcl_bus(lynx, lynx->iso_rcv.pcl[i]);
1455         }
1456         put_pcl(lynx, lynx->iso_rcv.pcl_start, &pcl);
1457
1458         /* FIFO sizes from left to right: ITF=48 ATF=48 GRF=160 */
1459         reg_write(lynx, FIFO_SIZES, 0x003030a0);
1460         /* 20 byte threshold before triggering PCI transfer */
1461         reg_write(lynx, DMA_GLOBAL_REGISTER, 0x2<<24);
1462         /* threshold on both send FIFOs before transmitting:
1463            FIFO size - cache line size - 1 */
1464         i = reg_read(lynx, PCI_LATENCY_CACHELINE) & 0xff;
1465         i = 0x30 - i - 1;
1466         reg_write(lynx, FIFO_XMIT_THRESHOLD, (i << 8) | i);
1467
1468         reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_1394);
1469
1470         reg_write(lynx, LINK_INT_ENABLE, LINK_INT_PHY_TIMEOUT
1471                   | LINK_INT_PHY_REG_RCVD  | LINK_INT_PHY_BUSRESET
1472                   | LINK_INT_ISO_STUCK     | LINK_INT_ASYNC_STUCK 
1473                   | LINK_INT_SENT_REJECT   | LINK_INT_TX_INVALID_TC
1474                   | LINK_INT_GRF_OVERFLOW  | LINK_INT_ITF_UNDERFLOW
1475                   | LINK_INT_ATF_UNDERFLOW);
1476         
1477         reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
1478         reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ASYNC_RCV), 0xa<<4);
1479         reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
1480         reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ASYNC_RCV),
1481                   DMA_WORD1_CMP_MATCH_LOCAL_NODE | DMA_WORD1_CMP_MATCH_BROADCAST
1482                   | DMA_WORD1_CMP_MATCH_EXACT    | DMA_WORD1_CMP_MATCH_BUS_BCAST
1483                   | DMA_WORD1_CMP_ENABLE_SELF_ID | DMA_WORD1_CMP_ENABLE_MASTER);
1484
1485         run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
1486
1487         reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ISO_RCV), 0);
1488         reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ISO_RCV), 0x9<<4);
1489         reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ISO_RCV), 0);
1490         reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
1491
1492         run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, 0, CHANNEL_ISO_RCV);
1493
1494         reg_write(lynx, LINK_CONTROL, LINK_CONTROL_RCV_CMP_VALID
1495                   | LINK_CONTROL_TX_ISO_EN   | LINK_CONTROL_RX_ISO_EN
1496                   | LINK_CONTROL_TX_ASYNC_EN | LINK_CONTROL_RX_ASYNC_EN
1497                   | LINK_CONTROL_RESET_TX    | LINK_CONTROL_RESET_RX);
1498
1499         if (!lynx->phyic.reg_1394a) {
1500                 /* attempt to enable contender bit -FIXME- would this work
1501                  * elsewhere? */
1502                 reg_set_bits(lynx, GPIO_CTRL_A, 0x1);
1503                 reg_write(lynx, GPIO_DATA_BASE + 0x3c, 0x1); 
1504         } else {
1505                 /* set the contender bit in the extended PHY register
1506                  * set. (Should check that bis 0,1,2 (=0xE0) is set
1507                  * in register 2?)
1508                  */
1509                 i = get_phy_reg(lynx, 4);
1510                 if (i != -1) set_phy_reg(lynx, 4, i | 0x40);
1511         }
1512
1513         hpsb_add_host(host);
1514         lynx->state = is_host;
1515
1516         return 0;
1517 #undef FAIL
1518 }
1519
1520
1521
1522 static size_t get_lynx_rom(struct hpsb_host *host, const quadlet_t **ptr)
1523 {
1524         *ptr = lynx_csr_rom;
1525         return sizeof(lynx_csr_rom);
1526 }
1527
1528 static struct pci_device_id pci_table[] __devinitdata = {
1529         {
1530                 vendor:     PCI_VENDOR_ID_TI,
1531                 device:     PCI_DEVICE_ID_TI_PCILYNX,
1532                 subvendor:  PCI_ANY_ID,
1533                 subdevice:  PCI_ANY_ID,
1534         },
1535         { }                     /* Terminating entry */
1536 };
1537
1538 static struct pci_driver lynx_pci_driver = {
1539         name:      PCILYNX_DRIVER_NAME,
1540         id_table:  pci_table,
1541         probe:     add_card,
1542         remove:    __devexit_p(remove_card),
1543 };
1544
1545 static struct hpsb_host_operations lynx_ops = {
1546         get_rom:          get_lynx_rom,
1547         transmit_packet:  lynx_transmit,
1548         devctl:           lynx_devctl,
1549 };
1550
1551 MODULE_AUTHOR("Andreas E. Bombe <andreas.bombe@munich.netsurf.de>");
1552 MODULE_DESCRIPTION("driver for Texas Instruments PCI Lynx IEEE-1394 controller");
1553 MODULE_LICENSE("GPL");
1554 MODULE_SUPPORTED_DEVICE("pcilynx");
1555 MODULE_DEVICE_TABLE(pci, pci_table);
1556
1557 static int __init pcilynx_init(void)
1558 {
1559         int ret;
1560
1561 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1562         if (register_chrdev(PCILYNX_MAJOR, PCILYNX_DRIVER_NAME, &aux_ops)) {
1563                 PRINT_G(KERN_ERR, "allocation of char major number %d failed",
1564                         PCILYNX_MAJOR);
1565                 return -EBUSY;
1566         }
1567 #endif
1568
1569         lynx_driver = hpsb_register_lowlevel(&lynx_ops, PCILYNX_DRIVER_NAME);
1570         if (!lynx_driver) {
1571                 ret = -ENOMEM;
1572                 goto free_char_dev;
1573         }
1574
1575         ret = pci_module_init(&lynx_pci_driver);
1576         if (ret < 0) {
1577                 PRINT_G(KERN_ERR, "PCI module init failed");
1578                 goto unregister_lowlevel;
1579         }
1580
1581         return 0;
1582
1583  unregister_lowlevel:
1584         hpsb_unregister_lowlevel(lynx_driver);
1585  free_char_dev:
1586 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1587         unregister_chrdev(PCILYNX_MAJOR, PCILYNX_DRIVER_NAME);
1588 #endif
1589
1590         return ret;
1591 }
1592
1593 static void __exit pcilynx_cleanup(void)
1594 {
1595         pci_unregister_driver(&lynx_pci_driver);
1596         hpsb_unregister_lowlevel(lynx_driver);
1597
1598 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1599         unregister_chrdev(PCILYNX_MAJOR, PCILYNX_DRIVER_NAME);
1600 #endif
1601 }
1602
1603
1604 module_init(pcilynx_init);
1605 module_exit(pcilynx_cleanup);