0018-MTD-nand-lots-of-xrx200-fixes.patch 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. From 997a8965db8417266bea3fbdcfa3e5655a1b52fa Mon Sep 17 00:00:00 2001
  2. From: John Crispin <[email protected]>
  3. Date: Tue, 9 Sep 2014 23:12:15 +0200
  4. Subject: [PATCH 18/36] MTD: nand: lots of xrx200 fixes
  5. Signed-off-by: John Crispin <[email protected]>
  6. ---
  7. drivers/mtd/nand/raw/xway_nand.c | 63 ++++++++++++++++++++++++++++++++++++++++++
  8. 1 file changed, 63 insertions(+)
  9. --- a/drivers/mtd/nand/raw/xway_nand.c
  10. +++ b/drivers/mtd/nand/raw/xway_nand.c
  11. @@ -61,6 +61,24 @@
  12. #define NAND_CON_CSMUX (1 << 1)
  13. #define NAND_CON_NANDM 1
  14. +#define DANUBE_PCI_REG32( addr ) (*(volatile u32 *)(addr))
  15. +#define PCI_CR_PR_OFFSET (KSEG1+0x1E105400)
  16. +#define PCI_CR_PC_ARB (PCI_CR_PR_OFFSET + 0x0080)
  17. +
  18. +/*
  19. + * req_mask provides a mechanism to prevent interference between
  20. + * nand and pci (probably only relevant for the BT Home Hub 2B).
  21. + * Setting it causes the corresponding pci req pins to be masked
  22. + * during nand access, and also moves ebu locking from the read/write
  23. + * functions to the chip select function to ensure that the whole
  24. + * operation runs with interrupts disabled.
  25. + * In addition it switches on some extra waiting in xway_cmd_ctrl().
  26. + * This seems to be necessary if the ebu_cs1 pin has open-drain disabled,
  27. + * which in turn seems to be necessary for the nor chip to be recognised
  28. + * reliably, on a board (Home Hub 2B again) which has both nor and nand.
  29. + */
  30. +static __be32 req_mask = 0;
  31. +
  32. struct xway_nand_data {
  33. struct nand_controller controller;
  34. struct nand_chip chip;
  35. @@ -92,10 +110,22 @@ static void xway_select_chip(struct nand
  36. case -1:
  37. ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON);
  38. ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON);
  39. +
  40. + if (req_mask) {
  41. + /* Unmask all external PCI request */
  42. + DANUBE_PCI_REG32(PCI_CR_PC_ARB) &= ~(req_mask << 16);
  43. + }
  44. +
  45. spin_unlock_irqrestore(&ebu_lock, data->csflags);
  46. break;
  47. case 0:
  48. spin_lock_irqsave(&ebu_lock, data->csflags);
  49. +
  50. + if (req_mask) {
  51. + /* Mask all external PCI request */
  52. + DANUBE_PCI_REG32(PCI_CR_PC_ARB) |= (req_mask << 16);
  53. + }
  54. +
  55. ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON);
  56. ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON);
  57. break;
  58. @@ -108,6 +138,11 @@ static void xway_cmd_ctrl(struct nand_ch
  59. {
  60. struct mtd_info *mtd = nand_to_mtd(chip);
  61. + if (req_mask) {
  62. + if (cmd != NAND_CMD_STATUS)
  63. + ltq_ebu_w32(0, EBU_NAND_WAIT); /* Clear nand ready */
  64. + }
  65. +
  66. if (cmd == NAND_CMD_NONE)
  67. return;
  68. @@ -118,6 +153,24 @@ static void xway_cmd_ctrl(struct nand_ch
  69. while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
  70. ;
  71. +
  72. + if (req_mask) {
  73. + /*
  74. + * program and erase have their own busy handlers
  75. + * status and sequential in needs no delay
  76. + */
  77. + switch (cmd) {
  78. + case NAND_CMD_ERASE1:
  79. + case NAND_CMD_SEQIN:
  80. + case NAND_CMD_STATUS:
  81. + case NAND_CMD_READID:
  82. + return;
  83. + }
  84. +
  85. + /* wait until command is processed */
  86. + while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_RD) == 0)
  87. + ;
  88. + }
  89. }
  90. static int xway_dev_ready(struct nand_chip *chip)
  91. @@ -170,6 +223,7 @@ static int xway_nand_probe(struct platfo
  92. int err;
  93. u32 cs;
  94. u32 cs_flag = 0;
  95. + const __be32 *req_mask_ptr;
  96. /* Allocate memory for the device structure (and zero it) */
  97. data = devm_kzalloc(&pdev->dev, sizeof(struct xway_nand_data),
  98. @@ -206,6 +260,15 @@ static int xway_nand_probe(struct platfo
  99. if (!err && cs == 1)
  100. cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1;
  101. + req_mask_ptr = of_get_property(pdev->dev.of_node,
  102. + "req-mask", NULL);
  103. +
  104. + /*
  105. + * Load the PCI req lines to mask from the device tree. If the
  106. + * property is not present, setting req_mask to 0 disables masking.
  107. + */
  108. + req_mask = (req_mask_ptr ? *req_mask_ptr : 0);
  109. +
  110. /* setup the EBU to run in NAND mode on our base addr */
  111. ltq_ebu_w32(CPHYSADDR(data->nandaddr)
  112. | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1);