Haber si nos vale para desbloquear Los Thomson
Go to the documentation of this file.
00001 /* 00002 * MTD map driver for AMD compatible flash chips (non-CFI) 00003 * 00004 * Author: Jonas Holmberg <jonas.holmberg@axis.com> 00005 * 00006 * $Id: amd_flash.c,v 1.19 2003/01/24 13:30:11 dwmw2 Exp $ 00007 * 00008 * Copyright (c) 2001 Axis Communications AB 00009 * 00010 * This file is under GPL. 00011 * 00012 */ 00013 00014 #include <linux/module.h> 00015 #include <linux/types.h> 00016 #include <linux/kernel.h> 00017 #include <linux/sched.h> 00018 #include <linux/errno.h> 00019 #include <linux/slab.h> 00020 #include <linux/delay.h> 00021 #include <linux/interrupt.h> 00022 #include <linux/mtd/map.h> 00023 #include <linux/mtd/mtd.h> 00024 #include <linux/mtd/flashchip.h> 00025 00026 /* There's no limit. It exists only to avoid realloc. */ 00027 #define MAX_AMD_CHIPS 8 00028 00029 #define DEVICE_TYPE_X8 (8 / 8) 00030 #define DEVICE_TYPE_X16 (16 / 8) 00031 #define DEVICE_TYPE_X32 (32 / 8) 00032 00033 /* Addresses */ 00034 #define ADDR_MANUFACTURER 0x0000 00035 #define ADDR_DEVICE_ID 0x0001 00036 #define ADDR_SECTOR_LOCK 0x0002 00037 #define ADDR_HANDSHAKE 0x0003 00038 #define ADDR_UNLOCK_1 0x0555 00039 #define ADDR_UNLOCK_2 0x02AA 00040 00041 /* Commands */ 00042 #define CMD_UNLOCK_DATA_1 0x00AA 00043 #define CMD_UNLOCK_DATA_2 0x0055 00044 #define CMD_MANUFACTURER_UNLOCK_DATA 0x0090 00045 #define CMD_UNLOCK_BYPASS_MODE 0x0020 00046 #define CMD_PROGRAM_UNLOCK_DATA 0x00A0 00047 #define CMD_RESET_DATA 0x00F0 00048 #define CMD_SECTOR_ERASE_UNLOCK_DATA 0x0080 00049 #define CMD_SECTOR_ERASE_UNLOCK_DATA_2 0x0030 00050 00051 #define CMD_UNLOCK_SECTOR 0x0060 00052 00053 /* Manufacturers */ 00054 #define MANUFACTURER_AMD 0x0001 00055 #define MANUFACTURER_ATMEL 0x001F 00056 #define MANUFACTURER_FUJITSU 0x0004 00057 #define MANUFACTURER_ST 0x0020 00058 #define MANUFACTURER_SST 0x00BF 00059 #define MANUFACTURER_TOSHIBA 0x0098 00060 00061 /* AMD */ 00062 #define AM29F800BB 0x2258 00063 #define AM29F800BT 0x22D6 00064 #define AM29LV800BB 0x225B 00065 #define AM29LV800BT 0x22DA 00066 #define AM29LV160DT 0x22C4 00067 #define AM29LV160DB 0x2249 00068 #define AM29BDS323D 0x22D1 00069 #define AM29BDS643D 0x227E 00070 00071 /* Atmel */ 00072 #define AT49xV16x 0x00C0 00073 #define AT49xV16xT 0x00C2 00074 00075 /* Fujitsu */ 00076 #define MBM29LV160TE 0x22C4 00077 #define MBM29LV160BE 0x2249 00078 #define MBM29LV800BB 0x225B 00079 00080 /* ST - www.st.com */ 00081 #define M29W800T 0x00D7 00082 #define M29W160DT 0x22C4 00083 #define M29W160DB 0x2249 00084 00085 /* SST */ 00086 #define SST39LF800 0x2781 00087 #define SST39LF160 0x2782 00088 00089 /* Toshiba */ 00090 #define TC58FVT160 0x00C2 00091 #define TC58FVB160 0x0043 00092 00093 #define D6_MASK 0x40 00094 00095 struct amd_flash_private { 00096 int device_type; 00097 int interleave; 00098 int numchips; 00099 unsigned long chipshift; 00100 // const char *im_name; 00101 struct flchip chips[0]; 00102 }; 00103 00104 struct amd_flash_info { 00105 const __u16 mfr_id; 00106 const __u16 dev_id; 00107 const char *name; 00108 const u_long size; 00109 const int numeraseregions; 00110 const struct mtd_erase_region_info regions[4]; 00111 }; 00112 00113 00114 00115 static int amd_flash_read(struct mtd_info *, loff_t, size_t, size_t *, 00116 u_char *); 00117 static int amd_flash_write(struct mtd_info *, loff_t, size_t, size_t *, 00118 const u_char *); 00119 static int amd_flash_erase(struct mtd_info *, struct erase_info *); 00120 static void amd_flash_sync(struct mtd_info *); 00121 static int amd_flash_suspend(struct mtd_info *); 00122 static void amd_flash_resume(struct mtd_info *); 00123 static void amd_flash_destroy(struct mtd_info *); 00124 static struct mtd_info *amd_flash_probe(struct map_info *map); 00125 00126 00127 static struct mtd_chip_driver amd_flash_chipdrv = { 00128 probe: amd_flash_probe, 00129 destroy: amd_flash_destroy, 00130 name: "amd_flash", 00131 module: THIS_MODULE 00132 }; 00133 00134 00135 00136 static const char im_name[] = "amd_flash"; 00137 00138 00139 00140 static inline __u32 wide_read(struct map_info *map, __u32 addr) 00141 { 00142 if (map->buswidth == 1) { 00143 return map->read8(map, addr); 00144 } else if (map->buswidth == 2) { 00145 return map->read16(map, addr); 00146 } else if (map->buswidth == 4) { 00147 return map->read32(map, addr); 00148 } 00149 00150 return 0; 00151 } 00152 00153 static inline void wide_write(struct map_info *map, __u32 val, __u32 addr) 00154 { 00155 if (map->buswidth == 1) { 00156 map->write8(map, val, addr); 00157 } else if (map->buswidth == 2) { 00158 map->write16(map, val, addr); 00159 } else if (map->buswidth == 4) { 00160 map->write32(map, val, addr); 00161 } 00162 } 00163 00164 static inline __u32 make_cmd(struct map_info *map, __u32 cmd) 00165 { 00166 const struct amd_flash_private *private = map->fldrv_priv; 00167 if ((private->interleave == 2) && 00168 (private->device_type == DEVICE_TYPE_X16)) { 00169 cmd |= (cmd << 16); 00170 } 00171 00172 return cmd; 00173 } 00174 00175 static inline void send_unlock(struct map_info *map, unsigned long base) 00176 { 00177 wide_write(map, (CMD_UNLOCK_DATA_1 << 16) | CMD_UNLOCK_DATA_1, 00178 base + (map->buswidth * ADDR_UNLOCK_1)); 00179 wide_write(map, (CMD_UNLOCK_DATA_2 << 16) | CMD_UNLOCK_DATA_2, 00180 base + (map->buswidth * ADDR_UNLOCK_2)); 00181 } 00182 00183 static inline void send_cmd(struct map_info *map, unsigned long base, __u32 cmd) 00184 { 00185 send_unlock(map, base); 00186 wide_write(map, make_cmd(map, cmd), 00187 base + (map->buswidth * ADDR_UNLOCK_1)); 00188 } 00189 00190 static inline void send_cmd_to_addr(struct map_info *map, unsigned long base, 00191 __u32 cmd, unsigned long addr) 00192 { 00193 send_unlock(map, base); 00194 wide_write(map, make_cmd(map, cmd), addr); 00195 } 00196 00197 static inline int flash_is_busy(struct map_info *map, unsigned long addr, 00198 int interleave) 00199 { 00200 00201 if ((interleave == 2) && (map->buswidth == 4)) { 00202 __u32 read1, read2; 00203 00204 read1 = wide_read(map, addr); 00205 read2 = wide_read(map, addr); 00206 00207 return (((read1 >> 16) & D6_MASK) != 00208 ((read2 >> 16) & D6_MASK)) || 00209 (((read1 & 0xffff) & D6_MASK) != 00210 ((read2 & 0xffff) & D6_MASK)); 00211 } 00212 00213 return ((wide_read(map, addr) & D6_MASK) != 00214 (wide_read(map, addr) & D6_MASK)); 00215 } 00216 00217 static inline void unlock_sector(struct map_info *map, unsigned long sect_addr, 00218 int unlock) 00219 { 00220 /* Sector lock address. A6 = 1 for unlock, A6 = 0 for lock */ 00221 int SLA = unlock ? 00222 (sect_addr | (0x40 * map->buswidth)) : 00223 (sect_addr & ~(0x40 * map->buswidth)) ; 00224 00225 __u32 cmd = make_cmd(map, CMD_UNLOCK_SECTOR); 00226 00227 wide_write(map, make_cmd(map, CMD_RESET_DATA), 0); 00228 wide_write(map, cmd, SLA); /* 1st cycle: write cmd to any address */ 00229 wide_write(map, cmd, SLA); /* 2nd cycle: write cmd to any address */ 00230 wide_write(map, cmd, SLA); /* 3rd cycle: write cmd to SLA */ 00231 } 00232 00233 static inline int is_sector_locked(struct map_info *map, 00234 unsigned long sect_addr) 00235 { 00236 int status; 00237 00238 wide_write(map, CMD_RESET_DATA, 0); 00239 send_cmd(map, sect_addr, CMD_MANUFACTURER_UNLOCK_DATA); 00240 00241 /* status is 0x0000 for unlocked and 0x0001 for locked */ 00242 status = wide_read(map, sect_addr + (map->buswidth * ADDR_SECTOR_LOCK)); 00243 wide_write(map, CMD_RESET_DATA, 0); 00244 return status; 00245 } 00246 00247 static int amd_flash_do_unlock(struct mtd_info *mtd, loff_t ofs, size_t len, 00248 int is_unlock) 00249 { 00250 struct map_info *map; 00251 struct mtd_erase_region_info *merip; 00252 int eraseoffset, erasesize, eraseblocks; 00253 int i; 00254 int retval = 0; 00255 int lock_status; 00256 00257 map = mtd->priv; 00258 00259 /* Pass the whole chip through sector by sector and check for each 00260 sector if the sector and the given interval overlap */ 00261 for(i = 0; i < mtd->numeraseregions; i++) { 00262 merip = &mtd->eraseregions[i]; 00263 00264 eraseoffset = merip->offset; 00265 erasesize = merip->erasesize; 00266 eraseblocks = merip->numblocks; 00267 00268 if (ofs > eraseoffset + erasesize) 00269 continue; 00270 00271 while (eraseblocks > 0) { 00272 if (ofs < eraseoffset + erasesize && ofs + len > eraseoffset) { 00273 unlock_sector(map, eraseoffset, is_unlock); 00274 00275 lock_status = is_sector_locked(map, eraseoffset); 00276 00277 if (is_unlock && lock_status) { 00278 printk("Cannot unlock sector at address %x length %xx\n", 00279 eraseoffset, merip->erasesize); 00280 retval = -1; 00281 } else if (!is_unlock && !lock_status) { 00282 printk("Cannot lock sector at address %x length %x\n", 00283 eraseoffset, merip->erasesize); 00284 retval = -1; 00285 } 00286 } 00287 eraseoffset += erasesize; 00288 eraseblocks --; 00289 } 00290 } 00291 return retval; 00292 } 00293 00294 static int amd_flash_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) 00295 { 00296 return amd_flash_do_unlock(mtd, ofs, len, 1); 00297 } 00298 00299 static int amd_flash_lock(struct mtd_info *mtd, loff_t ofs, size_t len) 00300 { 00301 return amd_flash_do_unlock(mtd, ofs, len, 0); 00302 } 00303 00304 00305 /* 00306 * Reads JEDEC manufacturer ID and device ID and returns the index of the first 00307 * matching table entry (-1 if not found or alias for already found chip). 00308 */ 00309 static int probe_new_chip(struct mtd_info *mtd, __u32 base, 00310 struct flchip *chips, 00311 struct amd_flash_private *private, 00312 const struct amd_flash_info *table, int table_size) 00313 { 00314 __u32 mfr_id; 00315 __u32 dev_id; 00316 struct map_info *map = mtd->priv; 00317 struct amd_flash_private temp; 00318 int i; 00319 00320 temp.device_type = DEVICE_TYPE_X16; // Assume X16 (FIXME) 00321 temp.interleave = 2; 00322 map->fldrv_priv = &temp; 00323 00324 /* Enter autoselect mode. */ 00325 send_cmd(map, base, CMD_RESET_DATA); 00326 send_cmd(map, base, CMD_MANUFACTURER_UNLOCK_DATA); 00327 00328 mfr_id = wide_read(map, base + (map->buswidth * ADDR_MANUFACTURER)); 00329 dev_id = wide_read(map, base + (map->buswidth * ADDR_DEVICE_ID)); 00330 00331 if ((map->buswidth == 4) && ((mfr_id >> 16) == (mfr_id & 0xffff)) && 00332 ((dev_id >> 16) == (dev_id & 0xffff))) { 00333 mfr_id &= 0xffff; 00334 dev_id &= 0xffff; 00335 } else { 00336 temp.interleave = 1; 00337 } 00338 00339 for (i = 0; i < table_size; i++) { 00340 if ((mfr_id == table[i].mfr_id) && 00341 (dev_id == table[i].dev_id)) { 00342 if (chips) { 00343 int j; 00344 00345 /* Is this an alias for an already found chip? 00346 * In that case that chip should be in 00347 * autoselect mode now. 00348 */ 00349 for (j = 0; j < private->numchips; j++) { 00350 __u32 mfr_id_other; 00351 __u32 dev_id_other; 00352 00353 mfr_id_other = 00354 wide_read(map, chips[j].start + 00355 (map->buswidth * 00356 ADDR_MANUFACTURER 00357 )); 00358 dev_id_other = 00359 wide_read(map, chips[j].start + 00360 (map->buswidth * 00361 ADDR_DEVICE_ID)); 00362 if (temp.interleave == 2) { 00363 mfr_id_other &= 0xffff; 00364 dev_id_other &= 0xffff; 00365 } 00366 if ((mfr_id_other == mfr_id) && 00367 (dev_id_other == dev_id)) { 00368 00369 /* Exit autoselect mode. */ 00370 send_cmd(map, base, 00371 CMD_RESET_DATA); 00372 00373 return -1; 00374 } 00375 } 00376 00377 if (private->numchips == MAX_AMD_CHIPS) { 00378 printk(KERN_WARNING 00379 "%s: Too many flash chips " 00380 "detected. Increase " 00381 "MAX_AMD_CHIPS from %d.\n", 00382 map->name, MAX_AMD_CHIPS); 00383 00384 return -1; 00385 } 00386 00387 chips[private->numchips].start = base; 00388 chips[private->numchips].state = FL_READY; 00389 chips[private->numchips].mutex = 00390 &chips[private->numchips]._spinlock; 00391 private->numchips++; 00392 } 00393 00394 printk("%s: Found %d x %ldMiB %s at 0x%x\n", map->name, 00395 temp.interleave, (table[i].size)/(1024*1024), 00396 table[i].name, base); 00397 00398 mtd->size += table[i].size * temp.interleave; 00399 mtd->numeraseregions += table[i].numeraseregions; 00400 00401 break; 00402 } 00403 } 00404 00405 /* Exit autoselect mode. */ 00406 send_cmd(map, base, CMD_RESET_DATA); 00407 00408 if (i == table_size) { 00409 printk(KERN_DEBUG "%s: unknown flash device at 0x%x, " 00410 "mfr id 0x%x, dev id 0x%x\n", map->name, 00411 base, mfr_id, dev_id); 00412 map->fldrv_priv = NULL; 00413 00414 return -1; 00415 } 00416 00417 private->device_type = temp.device_type; 00418 private->interleave = temp.interleave; 00419 00420 return i; 00421 } 00422 00423 00424 00425 static struct mtd_info *amd_flash_probe(struct map_info *map) 00426 { 00427 /* Keep this table on the stack so that it gets deallocated after the 00428 * probe is done. 00429 */ 00430 const struct amd_flash_info table[] = { 00431 { 00432 mfr_id: MANUFACTURER_AMD, 00433 dev_id: AM29LV160DT, 00434 name: "AMD AM29LV160DT", 00435 size: 0x00200000, 00436 numeraseregions: 4, 00437 regions: { 00438 { offset: 0x000000, erasesize: 0x10000, numblocks: 31 }, 00439 { offset: 0x1F0000, erasesize: 0x08000, numblocks: 1 }, 00440 { offset: 0x1F8000, erasesize: 0x02000, numblocks: 2 }, 00441 { offset: 0x1FC000, erasesize: 0x04000, numblocks: 1 } 00442 } 00443 }, { 00444 mfr_id: MANUFACTURER_AMD, 00445 dev_id: AM29LV160DB, 00446 name: "AMD AM29LV160DB", 00447 size: 0x00200000, 00448 numeraseregions: 4, 00449 regions: { 00450 { offset: 0x000000, erasesize: 0x04000, numblocks: 1 }, 00451 { offset: 0x004000, erasesize: 0x02000, numblocks: 2 }, 00452 { offset: 0x008000, erasesize: 0x08000, numblocks: 1 }, 00453 { offset: 0x010000, erasesize: 0x10000, numblocks: 31 } 00454 } 00455 }, { 00456 mfr_id: MANUFACTURER_TOSHIBA, 00457 dev_id: TC58FVT160, 00458 name: "Toshiba TC58FVT160", 00459 size: 0x00200000, 00460 numeraseregions: 4, 00461 regions: { 00462 { offset: 0x000000, erasesize: 0x10000, numblocks: 31 }, 00463 { offset: 0x1F0000, erasesize: 0x08000, numblocks: 1 }, 00464 { offset: 0x1F8000, erasesize: 0x02000, numblocks: 2 }, 00465 { offset: 0x1FC000, erasesize: 0x04000, numblocks: 1 } 00466 } 00467 }, { 00468 mfr_id: MANUFACTURER_FUJITSU, 00469 dev_id: MBM29LV160TE, 00470 name: "Fujitsu MBM29LV160TE", 00471 size: 0x00200000, 00472 numeraseregions: 4, 00473 regions: { 00474 { offset: 0x000000, erasesize: 0x10000, numblocks: 31 }, 00475 { offset: 0x1F0000, erasesize: 0x08000, numblocks: 1 }, 00476 { offset: 0x1F8000, erasesize: 0x02000, numblocks: 2 }, 00477 { offset: 0x1FC000, erasesize: 0x04000, numblocks: 1 } 00478 } 00479 }, { 00480 mfr_id: MANUFACTURER_TOSHIBA, 00481 dev_id: TC58FVB160, 00482 name: "Toshiba TC58FVB160", 00483 size: 0x00200000, 00484 numeraseregions: 4, 00485 regions: { 00486 { offset: 0x000000, erasesize: 0x04000, numblocks: 1 }, 00487 { offset: 0x004000, erasesize: 0x02000, numblocks: 2 }, 00488 { offset: 0x008000, erasesize: 0x08000, numblocks: 1 }, 00489 { offset: 0x010000, erasesize: 0x10000, numblocks: 31 } 00490 } 00491 }, { 00492 mfr_id: MANUFACTURER_FUJITSU, 00493 dev_id: MBM29LV160BE, 00494 name: "Fujitsu MBM29LV160BE", 00495 size: 0x00200000, 00496 numeraseregions: 4, 00497 regions: { 00498 { offset: 0x000000, erasesize: 0x04000, numblocks: 1 }, 00499 { offset: 0x004000, erasesize: 0x02000, numblocks: 2 }, 00500 { offset: 0x008000, erasesize: 0x08000, numblocks: 1 }, 00501 { offset: 0x010000, erasesize: 0x10000, numblocks: 31 } 00502 } 00503 }, { 00504 mfr_id: MANUFACTURER_AMD, 00505 dev_id: AM29LV800BB, 00506 name: "AMD AM29LV800BB", 00507 size: 0x00100000, 00508 numeraseregions: 4, 00509 regions: { 00510 { offset: 0x000000, erasesize: 0x04000, numblocks: 1 }, 00511 { offset: 0x004000, erasesize: 0x02000, numblocks: 2 }, 00512 { offset: 0x008000, erasesize: 0x08000, numblocks: 1 }, 00513 { offset: 0x010000, erasesize: 0x10000, numblocks: 15 } 00514 } 00515 }, { 00516 mfr_id: MANUFACTURER_AMD, 00517 dev_id: AM29F800BB, 00518 name: "AMD AM29F800BB", 00519 size: 0x00100000, 00520 numeraseregions: 4, 00521 regions: { 00522 { offset: 0x000000, erasesize: 0x04000, numblocks: 1 }, 00523 { offset: 0x004000, erasesize: 0x02000, numblocks: 2 }, 00524 { offset: 0x008000, erasesize: 0x08000, numblocks: 1 }, 00525 { offset: 0x010000, erasesize: 0x10000, numblocks: 15 } 00526 } 00527 }, { 00528 mfr_id: MANUFACTURER_AMD, 00529 dev_id: AM29LV800BT, 00530 name: "AMD AM29LV800BT", 00531 size: 0x00100000, 00532 numeraseregions: 4, 00533 regions: { 00534 { offset: 0x000000, erasesize: 0x10000, numblocks: 15 }, 00535 { offset: 0x0F0000, erasesize: 0x08000, numblocks: 1 }, 00536 { offset: 0x0F8000, erasesize: 0x02000, numblocks: 2 }, 00537 { offset: 0x0FC000, erasesize: 0x04000, numblocks: 1 } 00538 } 00539 }, { 00540 mfr_id: MANUFACTURER_AMD, 00541 dev_id: AM29F800BT, 00542 name: "AMD AM29F800BT", 00543 size: 0x00100000, 00544 numeraseregions: 4, 00545 regions: { 00546 { offset: 0x000000, erasesize: 0x10000, numblocks: 15 }, 00547 { offset: 0x0F0000, erasesize: 0x08000, numblocks: 1 }, 00548 { offset: 0x0F8000, erasesize: 0x02000, numblocks: 2 }, 00549 { offset: 0x0FC000, erasesize: 0x04000, numblocks: 1 } 00550 } 00551 }, { 00552 mfr_id: MANUFACTURER_AMD, 00553 dev_id: AM29LV800BB, 00554 name: "AMD AM29LV800BB", 00555 size: 0x00100000, 00556 numeraseregions: 4, 00557 regions: { 00558 { offset: 0x000000, erasesize: 0x10000, numblocks: 15 }, 00559 { offset: 0x0F0000, erasesize: 0x08000, numblocks: 1 }, 00560 { offset: 0x0F8000, erasesize: 0x02000, numblocks: 2 }, 00561 { offset: 0x0FC000, erasesize: 0x04000, numblocks: 1 } 00562 } 00563 }, { 00564 mfr_id: MANUFACTURER_FUJITSU, 00565 dev_id: MBM29LV800BB, 00566 name: "Fujitsu MBM29LV800BB", 00567 size: 0x00100000, 00568 numeraseregions: 4, 00569 regions: { 00570 { offset: 0x000000, erasesize: 0x04000, numblocks: 1 }, 00571 { offset: 0x004000, erasesize: 0x02000, numblocks: 2 }, 00572 { offset: 0x008000, erasesize: 0x08000, numblocks: 1 }, 00573 { offset: 0x010000, erasesize: 0x10000, numblocks: 15 } 00574 } 00575 }, { 00576 mfr_id: MANUFACTURER_ST, 00577 dev_id: M29W800T, 00578 name: "ST M29W800T", 00579 size: 0x00100000, 00580 numeraseregions: 4, 00581 regions: { 00582 { offset: 0x000000, erasesize: 0x10000, numblocks: 15 }, 00583 { offset: 0x0F0000, erasesize: 0x08000, numblocks: 1 }, 00584 { offset: 0x0F8000, erasesize: 0x02000, numblocks: 2 }, 00585 { offset: 0x0FC000, erasesize: 0x04000, numblocks: 1 } 00586 } 00587 }, { 00588 mfr_id: MANUFACTURER_ST, 00589 dev_id: M29W160DT, 00590 name: "ST M29W160DT", 00591 size: 0x00200000, 00592 numeraseregions: 4, 00593 regions: { 00594 { offset: 0x000000, erasesize: 0x10000, numblocks: 31 }, 00595 { offset: 0x1F0000, erasesize: 0x08000, numblocks: 1 }, 00596 { offset: 0x1F8000, erasesize: 0x02000, numblocks: 2 }, 00597 { offset: 0x1FC000, erasesize: 0x04000, numblocks: 1 } 00598 } 00599 }, { 00600 mfr_id: MANUFACTURER_ST, 00601 dev_id: M29W160DB, 00602 name: "ST M29W160DB", 00603 size: 0x00200000, 00604 numeraseregions: 4, 00605 regions: { 00606 { offset: 0x000000, erasesize: 0x04000, numblocks: 1 }, 00607 { offset: 0x004000, erasesize: 0x02000, numblocks: 2 }, 00608 { offset: 0x008000, erasesize: 0x08000, numblocks: 1 }, 00609 { offset: 0x010000, erasesize: 0x10000, numblocks: 31 } 00610 } 00611 }, { 00612 mfr_id: MANUFACTURER_AMD, 00613 dev_id: AM29BDS323D, 00614 name: "AMD AM29BDS323D", 00615 size: 0x00400000, 00616 numeraseregions: 3, 00617 regions: { 00618 { offset: 0x000000, erasesize: 0x10000, numblocks: 48 }, 00619 { offset: 0x300000, erasesize: 0x10000, numblocks: 15 }, 00620 { offset: 0x3f0000, erasesize: 0x02000, numblocks: 8 }, 00621 } 00622 }, { 00623 mfr_id: MANUFACTURER_AMD, 00624 dev_id: AM29BDS643D, 00625 name: "AMD AM29BDS643D", 00626 size: 0x00800000, 00627 numeraseregions: 3, 00628 regions: { 00629 { offset: 0x000000, erasesize: 0x10000, numblocks: 96 }, 00630 { offset: 0x600000, erasesize: 0x10000, numblocks: 31 }, 00631 { offset: 0x7f0000, erasesize: 0x02000, numblocks: 8 }, 00632 } 00633 }, { 00634 mfr_id: MANUFACTURER_ATMEL, 00635 dev_id: AT49xV16x, 00636 name: "Atmel AT49xV16x", 00637 size: 0x00200000, 00638 numeraseregions: 2, 00639 regions: { 00640 { offset: 0x000000, erasesize: 0x02000, numblocks: 8 }, 00641 { offset: 0x010000, erasesize: 0x10000, numblocks: 31 } 00642 } 00643 }, { 00644 mfr_id: MANUFACTURER_ATMEL, 00645 dev_id: AT49xV16xT, 00646 name: "Atmel AT49xV16xT", 00647 size: 0x00200000, 00648 numeraseregions: 2, 00649 regions: { 00650 { offset: 0x000000, erasesize: 0x10000, numblocks: 31 }, 00651 { offset: 0x1F0000, erasesize: 0x02000, numblocks: 8 } 00652 } 00653 } 00654 }; 00655 00656 struct mtd_info *mtd; 00657 struct flchip chips[MAX_AMD_CHIPS]; 00658 int table_pos[MAX_AMD_CHIPS]; 00659 struct amd_flash_private temp; 00660 struct amd_flash_private *private; 00661 u_long size; 00662 unsigned long base; 00663 int i; 00664 int reg_idx; 00665 int offset; 00666 00667 mtd = (struct mtd_info*)kmalloc(sizeof(*mtd), GFP_KERNEL); 00668 if (!mtd) { 00669 printk(KERN_WARNING 00670 "%s: kmalloc failed for info structure\n", map->name); 00671 return NULL; 00672 } 00673 memset(mtd, 0, sizeof(*mtd)); 00674 mtd->priv = map; 00675 00676 memset(&temp, 0, sizeof(temp)); 00677 00678 printk("%s: Probing for AMD compatible flash...\n", map->name); 00679 00680 if ((table_pos[0] = probe_new_chip(mtd, 0, NULL, &temp, table, 00681 sizeof(table)/sizeof(table[0]))) 00682 == -1) { 00683 printk(KERN_WARNING 00684 "%s: Found no AMD compatible device at location zero\n", 00685 map->name); 00686 kfree(mtd); 00687 00688 return NULL; 00689 } 00690 00691 chips[0].start = 0; 00692 chips[0].state = FL_READY; 00693 chips[0].mutex = &chips[0]._spinlock; 00694 temp.numchips = 1; 00695 for (size = mtd->size; size > 1; size >>= 1) { 00696 temp.chipshift++; 00697 } 00698 switch (temp.interleave) { 00699 case 2: 00700 temp.chipshift += 1; 00701 break; 00702 case 4: 00703 temp.chipshift += 2; 00704 break; 00705 } 00706 00707 /* Find out if there are any more chips in the map. */ 00708 for (base = (1 << temp.chipshift); 00709 base < map->size; 00710 base += (1 << temp.chipshift)) { 00711 int numchips = temp.numchips; 00712 table_pos[numchips] = probe_new_chip(mtd, base, chips, 00713 &temp, table, sizeof(table)/sizeof(table[0])); 00714 } 00715 00716 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) * 00717 mtd->numeraseregions, GFP_KERNEL); 00718 if (!mtd->eraseregions) { 00719 printk(KERN_WARNING "%s: Failed to allocate " 00720 "memory for MTD erase region info\n", map->name); 00721 kfree(mtd); 00722 map->fldrv_priv = NULL; 00723 return 0; 00724 } 00725 00726 reg_idx = 0; 00727 offset = 0; 00728 for (i = 0; i < temp.numchips; i++) { 00729 int dev_size; 00730 int j; 00731 00732 dev_size = 0; 00733 for (j = 0; j < table[table_pos[i]].numeraseregions; j++) { 00734 mtd->eraseregions[reg_idx].offset = offset + 00735 (table[table_pos[i]].regions[j].offset * 00736 temp.interleave); 00737 mtd->eraseregions[reg_idx].erasesize = 00738 table[table_pos[i]].regions[j].erasesize * 00739 temp.interleave; 00740 mtd->eraseregions[reg_idx].numblocks = 00741 table[table_pos[i]].regions[j].numblocks; 00742 if (mtd->erasesize < 00743 mtd->eraseregions[reg_idx].erasesize) { 00744 mtd->erasesize = 00745 mtd->eraseregions[reg_idx].erasesize; 00746 } 00747 dev_size += mtd->eraseregions[reg_idx].erasesize * 00748 mtd->eraseregions[reg_idx].numblocks; 00749 reg_idx++; 00750 } 00751 offset += dev_size; 00752 } 00753 mtd->type = MTD_NORFLASH; 00754 mtd->flags = MTD_CAP_NORFLASH; 00755 mtd->name = map->name; 00756 mtd->erase = amd_flash_erase; 00757 mtd->read = amd_flash_read; 00758 mtd->write = amd_flash_write; 00759 mtd->sync = amd_flash_sync; 00760 mtd->suspend = amd_flash_suspend; 00761 mtd->resume = amd_flash_resume; 00762 mtd->lock = amd_flash_lock; 00763 mtd->unlock = amd_flash_unlock; 00764 00765 private = kmalloc(sizeof(*private) + (sizeof(struct flchip) * 00766 temp.numchips), GFP_KERNEL); 00767 if (!private) { 00768 printk(KERN_WARNING 00769 "%s: kmalloc failed for private structure\n", map->name); 00770 kfree(mtd); 00771 map->fldrv_priv = NULL; 00772 return NULL; 00773 } 00774 memcpy(private, &temp, sizeof(temp)); 00775 memcpy(private->chips, chips, 00776 sizeof(struct flchip) * private->numchips); 00777 for (i = 0; i < private->numchips; i++) { 00778 init_waitqueue_head(&private->chips[i].wq); 00779 spin_lock_init(&private->chips[i]._spinlock); 00780 } 00781 00782 map->fldrv_priv = private; 00783 00784 map->fldrv = &amd_flash_chipdrv; 00785 MOD_INC_USE_COUNT; 00786 00787 return mtd; 00788 } 00789 00790 00791 00792 static inline int read_one_chip(struct map_info *map, struct flchip *chip, 00793 loff_t adr, size_t len, u_char *buf) 00794 { 00795 DECLARE_WAITQUEUE(wait, current); 00796 unsigned long timeo = jiffies + HZ; 00797 00798 retry: 00799 spin_lock_bh(chip->mutex); 00800 00801 if (chip->state != FL_READY){ 00802 printk(KERN_INFO "%s: waiting for chip to read, state = %d\n", 00803 map->name, chip->state); 00804 set_current_state(TASK_UNINTERRUPTIBLE); 00805 add_wait_queue(&chip->wq, &wait); 00806 00807 spin_unlock_bh(chip->mutex); 00808 00809 schedule(); 00810 remove_wait_queue(&chip->wq, &wait); 00811 00812 if(signal_pending(current)) { 00813 return -EINTR; 00814 } 00815 00816 timeo = jiffies + HZ; 00817 00818 goto retry; 00819 } 00820 00821 adr += chip->start; 00822 00823 chip->state = FL_READY; 00824 00825 map->copy_from(map, buf, adr, len); 00826 00827 wake_up(&chip->wq); 00828 spin_unlock_bh(chip->mutex); 00829 00830 return 0; 00831 } 00832 00833 00834 00835 static int amd_flash_read(struct mtd_info *mtd, loff_t from, size_t len, 00836 size_t *retlen, u_char *buf) 00837 { 00838 struct map_info *map = mtd->priv; 00839 struct amd_flash_private *private = map->fldrv_priv; 00840 unsigned long ofs; 00841 int chipnum; 00842 int ret = 0; 00843 00844 if ((from + len) > mtd->size) { 00845 printk(KERN_WARNING "%s: read request past end of device " 00846 "(0x%lx)\n", map->name, (unsigned long)from + len); 00847 00848 return -EINVAL; 00849 } 00850 00851 /* Offset within the first chip that the first read should start. */ 00852 chipnum = (from >> private->chipshift); 00853 ofs = from - (chipnum << private->chipshift); 00854 00855 *retlen = 0; 00856 00857 while (len) { 00858 unsigned long this_len; 00859 00860 if (chipnum >= private->numchips) { 00861 break; 00862 } 00863 00864 if ((len + ofs - 1) >> private->chipshift) { 00865 this_len = (1 << private->chipshift) - ofs; 00866 } else { 00867 this_len = len; 00868 } 00869 00870 ret = read_one_chip(map, &private->chips[chipnum], ofs, 00871 this_len, buf); 00872 if (ret) { 00873 break; 00874 } 00875 00876 *retlen += this_len; 00877 len -= this_len; 00878 buf += this_len; 00879 00880 ofs = 0; 00881 chipnum++; 00882 } 00883 00884 return ret; 00885 } 00886 00887 00888 00889 static int write_one_word(struct map_info *map, struct flchip *chip, 00890 unsigned long adr, __u32 datum) 00891 { 00892 unsigned long timeo = jiffies + HZ; 00893 struct amd_flash_private *private = map->fldrv_priv; 00894 DECLARE_WAITQUEUE(wait, current); 00895 int ret = 0; 00896 int times_left; 00897 00898 retry: 00899 spin_lock_bh(chip->mutex); 00900 00901 if (chip->state != FL_READY){ 00902 printk("%s: waiting for chip to write, state = %d\n", 00903 map->name, chip->state); 00904 set_current_state(TASK_UNINTERRUPTIBLE); 00905 add_wait_queue(&chip->wq, &wait); 00906 00907 spin_unlock_bh(chip->mutex); 00908 00909 schedule(); 00910 remove_wait_queue(&chip->wq, &wait); 00911 printk(KERN_INFO "%s: woke up to write\n", map->name); 00912 if(signal_pending(current)) 00913 return -EINTR; 00914 00915 timeo = jiffies + HZ; 00916 00917 goto retry; 00918 } 00919 00920 chip->state = FL_WRITING; 00921 00922 adr += chip->start; 00923 ENABLE_VPP(map); 00924 send_cmd(map, chip->start, CMD_PROGRAM_UNLOCK_DATA); 00925 wide_write(map, datum, adr); 00926 00927 times_left = 500000; 00928 while (times_left-- && flash_is_busy(map, adr, private->interleave)) { 00929 if (need_resched()) { 00930 spin_unlock_bh(chip->mutex); 00931 schedule(); 00932 spin_lock_bh(chip->mutex); 00933 } 00934 } 00935 00936 if (!times_left) { 00937 printk(KERN_WARNING "%s: write to 0x%lx timed out!\n", 00938 map->name, adr); 00939 ret = -EIO; 00940 } else { 00941 __u32 verify; 00942 if ((verify = wide_read(map, adr)) != datum) { 00943 printk(KERN_WARNING "%s: write to 0x%lx failed. " 00944 "datum = %x, verify = %x\n", 00945 map->name, adr, datum, verify); 00946 ret = -EIO; 00947 } 00948 } 00949 00950 DISABLE_VPP(map); 00951 chip->state = FL_READY; 00952 wake_up(&chip->wq); 00953 spin_unlock_bh(chip->mutex); 00954 00955 return ret; 00956 } 00957 00958 00959 00960 static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len, 00961 size_t *retlen, const u_char *buf) 00962 { 00963 struct map_info *map = mtd->priv; 00964 struct amd_flash_private *private = map->fldrv_priv; 00965 int ret = 0; 00966 int chipnum; 00967 unsigned long ofs; 00968 unsigned long chipstart; 00969 00970 *retlen = 0; 00971 if (!len) { 00972 return 0; 00973 } 00974 00975 chipnum = to >> private->chipshift; 00976 ofs = to - (chipnum << private->chipshift); 00977 chipstart = private->chips[chipnum].start; 00978 00979 /* If it's not bus-aligned, do the first byte write. */ 00980 if (ofs & (map->buswidth - 1)) { 00981 unsigned long bus_ofs = ofs & ~(map->buswidth - 1); 00982 int i = ofs - bus_ofs; 00983 int n = 0; 00984 u_char tmp_buf[4]; 00985 __u32 datum; 00986 00987 map->copy_from(map, tmp_buf, 00988 bus_ofs + private->chips[chipnum].start, 00989 map->buswidth); 00990 while (len && i < map->buswidth) 00991 tmp_buf[i++] = buf[n++], len--; 00992 00993 if (map->buswidth == 2) { 00994 datum = *(__u16*)tmp_buf; 00995 } else if (map->buswidth == 4) { 00996 datum = *(__u32*)tmp_buf; 00997 } else { 00998 return -EINVAL; /* should never happen, but be safe */ 00999 } 01000 01001 ret = write_one_word(map, &private->chips[chipnum], bus_ofs, 01002 datum); 01003 if (ret) { 01004 return ret; 01005 } 01006 01007 ofs += n; 01008 buf += n; 01009 (*retlen) += n; 01010 01011 if (ofs >> private->chipshift) { 01012 chipnum++; 01013 ofs = 0; 01014 if (chipnum == private->numchips) { 01015 return 0; 01016 } 01017 } 01018 } 01019 01020 /* We are now aligned, write as much as possible. */ 01021 while(len >= map->buswidth) { 01022 __u32 datum; 01023 01024 if (map->buswidth == 1) { 01025 datum = *(__u8*)buf; 01026 } else if (map->buswidth == 2) { 01027 datum = *(__u16*)buf; 01028 } else if (map->buswidth == 4) { 01029 datum = *(__u32*)buf; 01030 } else { 01031 return -EINVAL; 01032 } 01033 01034 ret = write_one_word(map, &private->chips[chipnum], ofs, datum); 01035 01036 if (ret) { 01037 return ret; 01038 } 01039 01040 ofs += map->buswidth; 01041 buf += map->buswidth; 01042 (*retlen) += map->buswidth; 01043 len -= map->buswidth; 01044 01045 if (ofs >> private->chipshift) { 01046 chipnum++; 01047 ofs = 0; 01048 if (chipnum == private->numchips) { 01049 return 0; 01050 } 01051 chipstart = private->chips[chipnum].start; 01052 } 01053 } 01054 01055 if (len & (map->buswidth - 1)) { 01056 int i = 0, n = 0; 01057 u_char tmp_buf[2]; 01058 __u32 datum; 01059 01060 map->copy_from(map, tmp_buf, 01061 ofs + private->chips[chipnum].start, 01062 map->buswidth); 01063 while (len--) { 01064 tmp_buf[i++] = buf[n++]; 01065 } 01066 01067 if (map->buswidth == 2) { 01068 datum = *(__u16*)tmp_buf; 01069 } else if (map->buswidth == 4) { 01070 datum = *(__u32*)tmp_buf; 01071 } else { 01072 return -EINVAL; /* should never happen, but be safe */ 01073 } 01074 01075 ret = write_one_word(map, &private->chips[chipnum], ofs, datum); 01076 01077 if (ret) { 01078 return ret; 01079 } 01080 01081 (*retlen) += n; 01082 } 01083 01084 return 0; 01085 } 01086 01087 01088 01089 static inline int erase_one_block(struct map_info *map, struct flchip *chip, 01090 unsigned long adr, u_long size) 01091 { 01092 unsigned long timeo = jiffies + HZ; 01093 struct amd_flash_private *private = map->fldrv_priv; 01094 DECLARE_WAITQUEUE(wait, current); 01095 01096 retry: 01097 spin_lock_bh(chip->mutex); 01098 01099 if (chip->state != FL_READY){ 01100 set_current_state(TASK_UNINTERRUPTIBLE); 01101 add_wait_queue(&chip->wq, &wait); 01102 01103 spin_unlock_bh(chip->mutex); 01104 01105 schedule(); 01106 remove_wait_queue(&chip->wq, &wait); 01107 01108 if (signal_pending(current)) { 01109 return -EINTR; 01110 } 01111 01112 timeo = jiffies + HZ; 01113 01114 goto retry; 01115 } 01116 01117 chip->state = FL_ERASING; 01118 01119 adr += chip->start; 01120 ENABLE_VPP(map); 01121 send_cmd(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA); 01122 send_cmd_to_addr(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA_2, adr); 01123 01124 timeo = jiffies + (HZ * 20); 01125 01126 spin_unlock_bh(chip->mutex); 01127 schedule_timeout(HZ); 01128 spin_lock_bh(chip->mutex); 01129 01130 while (flash_is_busy(map, adr, private->interleave)) { 01131 01132 if (chip->state != FL_ERASING) { 01133 /* Someone's suspended the erase. Sleep */ 01134 set_current_state(TASK_UNINTERRUPTIBLE); 01135 add_wait_queue(&chip->wq, &wait); 01136 01137 spin_unlock_bh(chip->mutex); 01138 printk(KERN_INFO "%s: erase suspended. Sleeping\n", 01139 map->name); 01140 schedule(); 01141 remove_wait_queue(&chip->wq, &wait); 01142 01143 if (signal_pending(current)) { 01144 return -EINTR; 01145 } 01146 01147 timeo = jiffies + (HZ*2); /* FIXME */ 01148 spin_lock_bh(chip->mutex); 01149 continue; 01150 } 01151 01152 /* OK Still waiting */ 01153 if (time_after(jiffies, timeo)) { 01154 chip->state = FL_READY; 01155 spin_unlock_bh(chip->mutex); 01156 printk(KERN_WARNING "%s: waiting for erase to complete " 01157 "timed out.\n", map->name); 01158 DISABLE_VPP(map); 01159 01160 return -EIO; 01161 } 01162 01163 /* Latency issues. Drop the lock, wait a while and retry */ 01164 spin_unlock_bh(chip->mutex); 01165 01166 if (need_resched()) 01167 schedule(); 01168 else 01169 udelay(1); 01170 01171 spin_lock_bh(chip->mutex); 01172 } 01173 01174 /* Verify every single word */ 01175 { 01176 int address; 01177 int error = 0; 01178 __u8 verify; 01179 01180 for (address = adr; address < (adr + size); address++) { 01181 if ((verify = map->read8(map, address)) != 0xFF) { 01182 error = 1; 01183 break; 01184 } 01185 } 01186 if (error) { 01187 chip->state = FL_READY; 01188 spin_unlock_bh(chip->mutex); 01189 printk(KERN_WARNING 01190 "%s: verify error at 0x%x, size %ld.\n", 01191 map->name, address, size); 01192 DISABLE_VPP(map); 01193 01194 return -EIO; 01195 } 01196 } 01197 01198 DISABLE_VPP(map); 01199 chip->state = FL_READY; 01200 wake_up(&chip->wq); 01201 spin_unlock_bh(chip->mutex); 01202 01203 return 0; 01204 } 01205 01206 01207 01208 static int amd_flash_erase(struct mtd_info *mtd, struct erase_info *instr) 01209 { 01210 struct map_info *map = mtd->priv; 01211 struct amd_flash_private *private = map->fldrv_priv; 01212 unsigned long adr, len; 01213 int chipnum; 01214 int ret = 0; 01215 int i; 01216 int first; 01217 struct mtd_erase_region_info *regions = mtd->eraseregions; 01218 01219 if (instr->addr > mtd->size) { 01220 return -EINVAL; 01221 } 01222 01223 if ((instr->len + instr->addr) > mtd->size) { 01224 return -EINVAL; 01225 } 01226 01227 /* Check that both start and end of the requested erase are 01228 * aligned with the erasesize at the appropriate addresses. 01229 */ 01230 01231 i = 0; 01232 01233 /* Skip all erase regions which are ended before the start of 01234 the requested erase. Actually, to save on the calculations, 01235 we skip to the first erase region which starts after the 01236 start of the requested erase, and then go back one. 01237 */ 01238 01239 while ((i < mtd->numeraseregions) && 01240 (instr->addr >= regions[i].offset)) { 01241 i++; 01242 } 01243 i--; 01244 01245 /* OK, now i is pointing at the erase region in which this 01246 * erase request starts. Check the start of the requested 01247 * erase range is aligned with the erase size which is in 01248 * effect here. 01249 */ 01250 01251 if (instr->addr & (regions[i].erasesize-1)) { 01252 return -EINVAL; 01253 } 01254 01255 /* Remember the erase region we start on. */ 01256 01257 first = i; 01258 01259 /* Next, check that the end of the requested erase is aligned 01260 * with the erase region at that address. 01261 */ 01262 01263 while ((i < mtd->numeraseregions) && 01264 ((instr->addr + instr->len) >= regions[i].offset)) { 01265 i++; 01266 } 01267 01268 /* As before, drop back one to point at the region in which 01269 * the address actually falls. 01270 */ 01271 01272 i--; 01273 01274 if ((instr->addr + instr->len) & (regions[i].erasesize-1)) { 01275 return -EINVAL; 01276 } 01277 01278 chipnum = instr->addr >> private->chipshift; 01279 adr = instr->addr - (chipnum << private->chipshift); 01280 len = instr->len; 01281 01282 i = first; 01283 01284 while (len) { 01285 ret = erase_one_block(map, &private->chips[chipnum], adr, 01286 regions[i].erasesize); 01287 01288 if (ret) { 01289 return ret; 01290 } 01291 01292 adr += regions[i].erasesize; 01293 len -= regions[i].erasesize; 01294 01295 if ((adr % (1 << private->chipshift)) == 01296 ((regions[i].offset + (regions[i].erasesize * 01297 regions[i].numblocks)) 01298 % (1 << private->chipshift))) { 01299 i++; 01300 } 01301 01302 if (adr >> private->chipshift) { 01303 adr = 0; 01304 chipnum++; 01305 if (chipnum >= private->numchips) { 01306 break; 01307 } 01308 } 01309 } 01310 01311 instr->state = MTD_ERASE_DONE; 01312 if (instr->callback) { 01313 instr->callback(instr); 01314 } 01315 01316 return 0; 01317 } 01318 01319 01320 01321 static void amd_flash_sync(struct mtd_info *mtd) 01322 { 01323 struct map_info *map = mtd->priv; 01324 struct amd_flash_private *private = map->fldrv_priv; 01325 int i; 01326 struct flchip *chip; 01327 int ret = 0; 01328 DECLARE_WAITQUEUE(wait, current); 01329 01330 for (i = 0; !ret && (i < private->numchips); i++) { 01331 chip = &private->chips[i]; 01332 01333 retry: 01334 spin_lock_bh(chip->mutex); 01335 01336 switch(chip->state) { 01337 case FL_READY: 01338 case FL_STATUS: 01339 case FL_CFI_QUERY: 01340 case FL_JEDEC_QUERY: 01341 chip->oldstate = chip->state; 01342 chip->state = FL_SYNCING; 01343 /* No need to wake_up() on this state change - 01344 * as the whole point is that nobody can do anything 01345 * with the chip now anyway. 01346 */ 01347 case FL_SYNCING: 01348 spin_unlock_bh(chip->mutex); 01349 break; 01350 01351 default: 01352 /* Not an idle state */ 01353 add_wait_queue(&chip->wq, &wait); 01354 01355 spin_unlock_bh(chip->mutex); 01356 01357 schedule(); 01358 01359 remove_wait_queue(&chip->wq, &wait); 01360 01361 goto retry; 01362 } 01363 } 01364 01365 /* Unlock the chips again */ 01366 for (i--; i >= 0; i--) { 01367 chip = &private->chips[i]; 01368 01369 spin_lock_bh(chip->mutex); 01370 01371 if (chip->state == FL_SYNCING) { 01372 chip->state = chip->oldstate; 01373 wake_up(&chip->wq); 01374 } 01375 spin_unlock_bh(chip->mutex); 01376 } 01377 } 01378 01379 01380 01381 static int amd_flash_suspend(struct mtd_info *mtd) 01382 { 01383 printk("amd_flash_suspend(): not implemented!\n"); 01384 return -EINVAL; 01385 } 01386 01387 01388 01389 static void amd_flash_resume(struct mtd_info *mtd) 01390 { 01391 printk("amd_flash_resume(): not implemented!\n"); 01392 } 01393 01394 01395 01396 static void amd_flash_destroy(struct mtd_info *mtd) 01397 { 01398 struct map_info *map = mtd->priv; 01399 struct amd_flash_private *private = map->fldrv_priv; 01400 kfree(private); 01401 } 01402 01403 int __init amd_flash_init(void) 01404 { 01405 register_mtd_chip_driver(&amd_flash_chipdrv); 01406 return 0; 01407 } 01408 01409 void __exit amd_flash_exit(void) 01410 { 01411 unregister_mtd_chip_driver(&amd_flash_chipdrv); 01412 } 01413 01414 module_init(amd_flash_init); 01415 module_exit(amd_flash_exit); 01416 01417 MODULE_LICENSE("GPL"); 01418 MODULE_AUTHOR("Jonas Holmberg <jonas.holmberg@axis.com>"); 01419 MODULE_DESCRIPTION("Old MTD chip driver for AMD flash chips"); Me lo encontre por hay . Creo que es para desbloquear las memorias nombradas en el mismo . Salu2 |
La franja horaria es GMT +2. Ahora son las 19:10. |
Powered por vBulletin™ Version 3.8.10
Copyright © 2024 vBulletin Solutions, Inc. All rights reserved.
Traducido por vBsoporte - vBulletin en español
ZackYFileS - Foros de Debate