b67055adef5de24874aaaa1b3121fcb252b84091
[yaffs2.git] / yaffs_guts.c
1 /*
2  * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
3  *
4  * Copyright (C) 2002-2011 Aleph One Ltd.
5  *   for Toby Churchill Ltd and Brightstar Engineering
6  *
7  * Created by Charles Manning <charles@aleph1.co.uk>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13
14 #include "yportenv.h"
15 #include "yaffs_trace.h"
16
17 #include "yaffs_guts.h"
18 #include "yaffs_getblockinfo.h"
19 #include "yaffs_tagscompat.h"
20 #include "yaffs_nand.h"
21 #include "yaffs_yaffs1.h"
22 #include "yaffs_yaffs2.h"
23 #include "yaffs_bitmap.h"
24 #include "yaffs_verify.h"
25 #include "yaffs_nand.h"
26 #include "yaffs_packedtags2.h"
27 #include "yaffs_nameval.h"
28 #include "yaffs_allocator.h"
29 #include "yaffs_attribs.h"
30 #include "yaffs_summary.h"
31
32 /* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */
33 #define YAFFS_GC_GOOD_ENOUGH 2
34 #define YAFFS_GC_PASSIVE_THRESHOLD 4
35
36 #include "yaffs_ecc.h"
37
38 /* Forward declarations */
39
40 static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
41                              const u8 *buffer, int n_bytes, int use_reserve);
42
43
44
45 /* Function to calculate chunk and offset */
46
47 void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
48                                 int *chunk_out, u32 *offset_out)
49 {
50         int chunk;
51         u32 offset;
52
53         chunk = (u32) (addr >> dev->chunk_shift);
54
55         if (dev->chunk_div == 1) {
56                 /* easy power of 2 case */
57                 offset = (u32) (addr & dev->chunk_mask);
58         } else {
59                 /* Non power-of-2 case */
60
61                 loff_t chunk_base;
62
63                 chunk /= dev->chunk_div;
64
65                 chunk_base = ((loff_t) chunk) * dev->data_bytes_per_chunk;
66                 offset = (u32) (addr - chunk_base);
67         }
68
69         *chunk_out = chunk;
70         *offset_out = offset;
71 }
72
73 /* Function to return the number of shifts for a power of 2 greater than or
74  * equal to the given number
75  * Note we don't try to cater for all possible numbers and this does not have to
76  * be hellishly efficient.
77  */
78
79 static inline u32 calc_shifts_ceiling(u32 x)
80 {
81         int extra_bits;
82         int shifts;
83
84         shifts = extra_bits = 0;
85
86         while (x > 1) {
87                 if (x & 1)
88                         extra_bits++;
89                 x >>= 1;
90                 shifts++;
91         }
92
93         if (extra_bits)
94                 shifts++;
95
96         return shifts;
97 }
98
99 /* Function to return the number of shifts to get a 1 in bit 0
100  */
101
102 static inline u32 calc_shifts(u32 x)
103 {
104         u32 shifts;
105
106         shifts = 0;
107
108         if (!x)
109                 return 0;
110
111         while (!(x & 1)) {
112                 x >>= 1;
113                 shifts++;
114         }
115
116         return shifts;
117 }
118
119 /*
120  * Temporary buffer manipulations.
121  */
122
123 static int yaffs_init_tmp_buffers(struct yaffs_dev *dev)
124 {
125         int i;
126         u8 *buf = (u8 *) 1;
127
128         memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer));
129
130         for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
131                 dev->temp_buffer[i].in_use = 0;
132                 buf = kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
133                 dev->temp_buffer[i].buffer = buf;
134         }
135
136         return buf ? YAFFS_OK : YAFFS_FAIL;
137 }
138
139 u8 *yaffs_get_temp_buffer(struct yaffs_dev * dev)
140 {
141         int i;
142
143         dev->temp_in_use++;
144         if (dev->temp_in_use > dev->max_temp)
145                 dev->max_temp = dev->temp_in_use;
146
147         for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
148                 if (dev->temp_buffer[i].in_use == 0) {
149                         dev->temp_buffer[i].in_use = 1;
150                         return dev->temp_buffer[i].buffer;
151                 }
152         }
153
154         yaffs_trace(YAFFS_TRACE_BUFFERS, "Out of temp buffers");
155         /*
156          * If we got here then we have to allocate an unmanaged one
157          * This is not good.
158          */
159
160         dev->unmanaged_buffer_allocs++;
161         return kmalloc(dev->data_bytes_per_chunk, GFP_NOFS);
162
163 }
164
165 void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer)
166 {
167         int i;
168
169         dev->temp_in_use--;
170
171         for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
172                 if (dev->temp_buffer[i].buffer == buffer) {
173                         dev->temp_buffer[i].in_use = 0;
174                         return;
175                 }
176         }
177
178         if (buffer) {
179                 /* assume it is an unmanaged one. */
180                 yaffs_trace(YAFFS_TRACE_BUFFERS,
181                         "Releasing unmanaged temp buffer");
182                 kfree(buffer);
183                 dev->unmanaged_buffer_deallocs++;
184         }
185
186 }
187
188 /*
189  * Functions for robustisizing TODO
190  *
191  */
192
193 static void yaffs_handle_chunk_wr_ok(struct yaffs_dev *dev, int nand_chunk,
194                                      const u8 *data,
195                                      const struct yaffs_ext_tags *tags)
196 {
197         (void) dev;
198         (void) nand_chunk;
199         (void) data;
200         (void) tags;
201 }
202
203 static void yaffs_handle_chunk_update(struct yaffs_dev *dev, int nand_chunk,
204                                       const struct yaffs_ext_tags *tags)
205 {
206         (void) dev;
207         (void) nand_chunk;
208         (void) tags;
209 }
210
211 void yaffs_handle_chunk_error(struct yaffs_dev *dev,
212                               struct yaffs_block_info *bi)
213 {
214         if (!bi->gc_prioritise) {
215                 bi->gc_prioritise = 1;
216                 dev->has_pending_prioritised_gc = 1;
217                 bi->chunk_error_strikes++;
218
219                 if (bi->chunk_error_strikes > 3) {
220                         bi->needs_retiring = 1; /* Too many stikes, so retire */
221                         yaffs_trace(YAFFS_TRACE_ALWAYS,
222                                 "yaffs: Block struck out");
223
224                 }
225         }
226 }
227
228 static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, int nand_chunk,
229                                         int erased_ok)
230 {
231         int flash_block = nand_chunk / dev->param.chunks_per_block;
232         struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
233
234         yaffs_handle_chunk_error(dev, bi);
235
236         if (erased_ok) {
237                 /* Was an actual write failure,
238                  * so mark the block for retirement.*/
239                 bi->needs_retiring = 1;
240                 yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
241                   "**>> Block %d needs retiring", flash_block);
242         }
243
244         /* Delete the chunk */
245         yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
246         yaffs_skip_rest_of_block(dev);
247 }
248
249 /*
250  * Verification code
251  */
252
253 /*
254  *  Simple hash function. Needs to have a reasonable spread
255  */
256
257 static inline int yaffs_hash_fn(int n)
258 {
259         if (n < 0)
260                 n = -n;
261         return n % YAFFS_NOBJECT_BUCKETS;
262 }
263
264 /*
265  * Access functions to useful fake objects.
266  * Note that root might have a presence in NAND if permissions are set.
267  */
268
269 struct yaffs_obj *yaffs_root(struct yaffs_dev *dev)
270 {
271         return dev->root_dir;
272 }
273
274 struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev)
275 {
276         return dev->lost_n_found;
277 }
278
279 /*
280  *  Erased NAND checking functions
281  */
282
283 int yaffs_check_ff(u8 *buffer, int n_bytes)
284 {
285         /* Horrible, slow implementation */
286         while (n_bytes--) {
287                 if (*buffer != 0xff)
288                         return 0;
289                 buffer++;
290         }
291         return 1;
292 }
293
294 static int yaffs_check_chunk_erased(struct yaffs_dev *dev, int nand_chunk)
295 {
296         int retval = YAFFS_OK;
297         u8 *data = yaffs_get_temp_buffer(dev);
298         struct yaffs_ext_tags tags;
299         int result;
300
301         result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags);
302
303         if (tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR)
304                 retval = YAFFS_FAIL;
305
306         if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) ||
307                 tags.chunk_used) {
308                 yaffs_trace(YAFFS_TRACE_NANDACCESS,
309                         "Chunk %d not erased", nand_chunk);
310                 retval = YAFFS_FAIL;
311         }
312
313         yaffs_release_temp_buffer(dev, data);
314
315         return retval;
316
317 }
318
319 static int yaffs_verify_chunk_written(struct yaffs_dev *dev,
320                                       int nand_chunk,
321                                       const u8 *data,
322                                       struct yaffs_ext_tags *tags)
323 {
324         int retval = YAFFS_OK;
325         struct yaffs_ext_tags temp_tags;
326         u8 *buffer = yaffs_get_temp_buffer(dev);
327         int result;
328
329         result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags);
330         if (memcmp(buffer, data, dev->data_bytes_per_chunk) ||
331             temp_tags.obj_id != tags->obj_id ||
332             temp_tags.chunk_id != tags->chunk_id ||
333             temp_tags.n_bytes != tags->n_bytes)
334                 retval = YAFFS_FAIL;
335
336         yaffs_release_temp_buffer(dev, buffer);
337
338         return retval;
339 }
340
341
342 int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks)
343 {
344         int reserved_chunks;
345         int reserved_blocks = dev->param.n_reserved_blocks;
346         int checkpt_blocks;
347
348         checkpt_blocks = yaffs_calc_checkpt_blocks_required(dev);
349
350         reserved_chunks =
351             (reserved_blocks + checkpt_blocks) * dev->param.chunks_per_block;
352
353         return (dev->n_free_chunks > (reserved_chunks + n_chunks));
354 }
355
356 static int yaffs_find_alloc_block(struct yaffs_dev *dev)
357 {
358         int i;
359         struct yaffs_block_info *bi;
360
361         if (dev->n_erased_blocks < 1) {
362                 /* Hoosterman we've got a problem.
363                  * Can't get space to gc
364                  */
365                 yaffs_trace(YAFFS_TRACE_ERROR,
366                   "yaffs tragedy: no more erased blocks");
367
368                 return -1;
369         }
370
371         /* Find an empty block. */
372
373         for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
374                 dev->alloc_block_finder++;
375                 if (dev->alloc_block_finder < dev->internal_start_block
376                     || dev->alloc_block_finder > dev->internal_end_block) {
377                         dev->alloc_block_finder = dev->internal_start_block;
378                 }
379
380                 bi = yaffs_get_block_info(dev, dev->alloc_block_finder);
381
382                 if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
383                         bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING;
384                         dev->seq_number++;
385                         bi->seq_number = dev->seq_number;
386                         dev->n_erased_blocks--;
387                         yaffs_trace(YAFFS_TRACE_ALLOCATE,
388                           "Allocated block %d, seq  %d, %d left" ,
389                            dev->alloc_block_finder, dev->seq_number,
390                            dev->n_erased_blocks);
391                         return dev->alloc_block_finder;
392                 }
393         }
394
395         yaffs_trace(YAFFS_TRACE_ALWAYS,
396                 "yaffs tragedy: no more erased blocks, but there should have been %d",
397                 dev->n_erased_blocks);
398
399         return -1;
400 }
401
402 static int yaffs_alloc_chunk(struct yaffs_dev *dev, int use_reserver,
403                              struct yaffs_block_info **block_ptr)
404 {
405         int ret_val;
406         struct yaffs_block_info *bi;
407
408         if (dev->alloc_block < 0) {
409                 /* Get next block to allocate off */
410                 dev->alloc_block = yaffs_find_alloc_block(dev);
411                 dev->alloc_page = 0;
412         }
413
414         if (!use_reserver && !yaffs_check_alloc_available(dev, 1)) {
415                 /* No space unless we're allowed to use the reserve. */
416                 return -1;
417         }
418
419         if (dev->n_erased_blocks < dev->param.n_reserved_blocks
420             && dev->alloc_page == 0)
421                 yaffs_trace(YAFFS_TRACE_ALLOCATE, "Allocating reserve");
422
423         /* Next page please.... */
424         if (dev->alloc_block >= 0) {
425                 bi = yaffs_get_block_info(dev, dev->alloc_block);
426
427                 ret_val = (dev->alloc_block * dev->param.chunks_per_block) +
428                     dev->alloc_page;
429                 bi->pages_in_use++;
430                 yaffs_set_chunk_bit(dev, dev->alloc_block, dev->alloc_page);
431
432                 dev->alloc_page++;
433
434                 dev->n_free_chunks--;
435
436                 /* If the block is full set the state to full */
437                 if (dev->alloc_page >= dev->param.chunks_per_block) {
438                         bi->block_state = YAFFS_BLOCK_STATE_FULL;
439                         dev->alloc_block = -1;
440                 }
441
442                 if (block_ptr)
443                         *block_ptr = bi;
444
445                 return ret_val;
446         }
447
448         yaffs_trace(YAFFS_TRACE_ERROR,
449                 "!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!");
450
451         return -1;
452 }
453
454 static int yaffs_get_erased_chunks(struct yaffs_dev *dev)
455 {
456         int n;
457
458         n = dev->n_erased_blocks * dev->param.chunks_per_block;
459
460         if (dev->alloc_block > 0)
461                 n += (dev->param.chunks_per_block - dev->alloc_page);
462
463         return n;
464
465 }
466
467 /*
468  * yaffs_skip_rest_of_block() skips over the rest of the allocation block
469  * if we don't want to write to it.
470  */
471 void yaffs_skip_rest_of_block(struct yaffs_dev *dev)
472 {
473         struct yaffs_block_info *bi;
474
475         if (dev->alloc_block > 0) {
476                 bi = yaffs_get_block_info(dev, dev->alloc_block);
477                 if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
478                         bi->block_state = YAFFS_BLOCK_STATE_FULL;
479                         dev->alloc_block = -1;
480                 }
481         }
482 }
483
484 static int yaffs_write_new_chunk(struct yaffs_dev *dev,
485                                  const u8 *data,
486                                  struct yaffs_ext_tags *tags, int use_reserver)
487 {
488         int attempts = 0;
489         int write_ok = 0;
490         int chunk;
491
492         yaffs2_checkpt_invalidate(dev);
493
494         do {
495                 struct yaffs_block_info *bi = 0;
496                 int erased_ok = 0;
497
498                 chunk = yaffs_alloc_chunk(dev, use_reserver, &bi);
499                 if (chunk < 0) {
500                         /* no space */
501                         break;
502                 }
503
504                 /* First check this chunk is erased, if it needs
505                  * checking.  The checking policy (unless forced
506                  * always on) is as follows:
507                  *
508                  * Check the first page we try to write in a block.
509                  * If the check passes then we don't need to check any
510                  * more.        If the check fails, we check again...
511                  * If the block has been erased, we don't need to check.
512                  *
513                  * However, if the block has been prioritised for gc,
514                  * then we think there might be something odd about
515                  * this block and stop using it.
516                  *
517                  * Rationale: We should only ever see chunks that have
518                  * not been erased if there was a partially written
519                  * chunk due to power loss.  This checking policy should
520                  * catch that case with very few checks and thus save a
521                  * lot of checks that are most likely not needed.
522                  *
523                  * Mods to the above
524                  * If an erase check fails or the write fails we skip the
525                  * rest of the block.
526                  */
527
528                 /* let's give it a try */
529                 attempts++;
530
531                 if (dev->param.always_check_erased)
532                         bi->skip_erased_check = 0;
533
534                 if (!bi->skip_erased_check) {
535                         erased_ok = yaffs_check_chunk_erased(dev, chunk);
536                         if (erased_ok != YAFFS_OK) {
537                                 yaffs_trace(YAFFS_TRACE_ERROR,
538                                   "**>> yaffs chunk %d was not erased",
539                                   chunk);
540
541                                 /* If not erased, delete this one,
542                                  * skip rest of block and
543                                  * try another chunk */
544                                 yaffs_chunk_del(dev, chunk, 1, __LINE__);
545                                 yaffs_skip_rest_of_block(dev);
546                                 continue;
547                         }
548                 }
549
550                 write_ok = yaffs_wr_chunk_tags_nand(dev, chunk, data, tags);
551
552                 if (!bi->skip_erased_check)
553                         write_ok =
554                             yaffs_verify_chunk_written(dev, chunk, data, tags);
555
556                 if (write_ok != YAFFS_OK) {
557                         /* Clean up aborted write, skip to next block and
558                          * try another chunk */
559                         yaffs_handle_chunk_wr_error(dev, chunk, erased_ok);
560                         continue;
561                 }
562
563                 bi->skip_erased_check = 1;
564
565                 /* Copy the data into the robustification buffer */
566                 yaffs_handle_chunk_wr_ok(dev, chunk, data, tags);
567
568         } while (write_ok != YAFFS_OK &&
569                  (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts));
570
571         if (!write_ok)
572                 chunk = -1;
573
574         if (attempts > 1) {
575                 yaffs_trace(YAFFS_TRACE_ERROR,
576                         "**>> yaffs write required %d attempts",
577                         attempts);
578                 dev->n_retried_writes += (attempts - 1);
579         }
580
581         return chunk;
582 }
583
584 /*
585  * Block retiring for handling a broken block.
586  */
587
588 static void yaffs_retire_block(struct yaffs_dev *dev, int flash_block)
589 {
590         struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
591
592         yaffs2_checkpt_invalidate(dev);
593
594         yaffs2_clear_oldest_dirty_seq(dev, bi);
595
596         if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) {
597                 if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) {
598                         yaffs_trace(YAFFS_TRACE_ALWAYS,
599                                 "yaffs: Failed to mark bad and erase block %d",
600                                 flash_block);
601                 } else {
602                         struct yaffs_ext_tags tags;
603                         int chunk_id =
604                             flash_block * dev->param.chunks_per_block;
605
606                         u8 *buffer = yaffs_get_temp_buffer(dev);
607
608                         memset(buffer, 0xff, dev->data_bytes_per_chunk);
609                         memset(&tags, 0, sizeof(tags));
610                         tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK;
611                         if (dev->param.write_chunk_tags_fn(dev, chunk_id -
612                                                            dev->chunk_offset,
613                                                            buffer,
614                                                            &tags) != YAFFS_OK)
615                                 yaffs_trace(YAFFS_TRACE_ALWAYS,
616                                         "yaffs: Failed to write bad block marker to block %d",
617                                         flash_block);
618
619                         yaffs_release_temp_buffer(dev, buffer);
620                 }
621         }
622
623         bi->block_state = YAFFS_BLOCK_STATE_DEAD;
624         bi->gc_prioritise = 0;
625         bi->needs_retiring = 0;
626
627         dev->n_retired_blocks++;
628 }
629
630 /*---------------- Name handling functions ------------*/
631
632 static u16 yaffs_calc_name_sum(const YCHAR *name)
633 {
634         u16 sum = 0;
635         u16 i = 1;
636
637         if (!name)
638                 return 0;
639
640         while ((*name) && i < (YAFFS_MAX_NAME_LENGTH / 2)) {
641
642                 /* 0x1f mask is case insensitive */
643                 sum += ((*name) & 0x1f) * i;
644                 i++;
645                 name++;
646         }
647         return sum;
648 }
649
650 void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name)
651 {
652         memset(obj->short_name, 0, sizeof(obj->short_name));
653         if (name &&
654                 strnlen(name, YAFFS_SHORT_NAME_LENGTH + 1) <=
655                 YAFFS_SHORT_NAME_LENGTH)
656                 strcpy(obj->short_name, name);
657         else
658                 obj->short_name[0] = _Y('\0');
659         obj->sum = yaffs_calc_name_sum(name);
660 }
661
662 void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
663                                 const struct yaffs_obj_hdr *oh)
664 {
665 #ifdef CONFIG_YAFFS_AUTO_UNICODE
666         YCHAR tmp_name[YAFFS_MAX_NAME_LENGTH + 1];
667         memset(tmp_name, 0, sizeof(tmp_name));
668         yaffs_load_name_from_oh(obj->my_dev, tmp_name, oh->name,
669                                 YAFFS_MAX_NAME_LENGTH + 1);
670         yaffs_set_obj_name(obj, tmp_name);
671 #else
672         yaffs_set_obj_name(obj, oh->name);
673 #endif
674 }
675
676 loff_t yaffs_max_file_size(struct yaffs_dev *dev)
677 {
678         if(sizeof(loff_t) < 8)
679                 return YAFFS_MAX_FILE_SIZE_32;
680         else
681                 return ((loff_t) YAFFS_MAX_CHUNK_ID) * dev->data_bytes_per_chunk;
682 }
683
684 /*-------------------- TNODES -------------------
685
686  * List of spare tnodes
687  * The list is hooked together using the first pointer
688  * in the tnode.
689  */
690
691 struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev)
692 {
693         struct yaffs_tnode *tn = yaffs_alloc_raw_tnode(dev);
694
695         if (tn) {
696                 memset(tn, 0, dev->tnode_size);
697                 dev->n_tnodes++;
698         }
699
700         dev->checkpoint_blocks_required = 0;    /* force recalculation */
701
702         return tn;
703 }
704
705 /* FreeTnode frees up a tnode and puts it back on the free list */
706 static void yaffs_free_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
707 {
708         yaffs_free_raw_tnode(dev, tn);
709         dev->n_tnodes--;
710         dev->checkpoint_blocks_required = 0;    /* force recalculation */
711 }
712
713 static void yaffs_deinit_tnodes_and_objs(struct yaffs_dev *dev)
714 {
715         yaffs_deinit_raw_tnodes_and_objs(dev);
716         dev->n_obj = 0;
717         dev->n_tnodes = 0;
718 }
719
720 static void yaffs_load_tnode_0(struct yaffs_dev *dev, struct yaffs_tnode *tn,
721                         unsigned pos, unsigned val)
722 {
723         u32 *map = (u32 *) tn;
724         u32 bit_in_map;
725         u32 bit_in_word;
726         u32 word_in_map;
727         u32 mask;
728
729         pos &= YAFFS_TNODES_LEVEL0_MASK;
730         val >>= dev->chunk_grp_bits;
731
732         bit_in_map = pos * dev->tnode_width;
733         word_in_map = bit_in_map / 32;
734         bit_in_word = bit_in_map & (32 - 1);
735
736         mask = dev->tnode_mask << bit_in_word;
737
738         map[word_in_map] &= ~mask;
739         map[word_in_map] |= (mask & (val << bit_in_word));
740
741         if (dev->tnode_width > (32 - bit_in_word)) {
742                 bit_in_word = (32 - bit_in_word);
743                 word_in_map++;
744                 mask =
745                     dev->tnode_mask >> bit_in_word;
746                 map[word_in_map] &= ~mask;
747                 map[word_in_map] |= (mask & (val >> bit_in_word));
748         }
749 }
750
751 u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
752                          unsigned pos)
753 {
754         u32 *map = (u32 *) tn;
755         u32 bit_in_map;
756         u32 bit_in_word;
757         u32 word_in_map;
758         u32 val;
759
760         pos &= YAFFS_TNODES_LEVEL0_MASK;
761
762         bit_in_map = pos * dev->tnode_width;
763         word_in_map = bit_in_map / 32;
764         bit_in_word = bit_in_map & (32 - 1);
765
766         val = map[word_in_map] >> bit_in_word;
767
768         if (dev->tnode_width > (32 - bit_in_word)) {
769                 bit_in_word = (32 - bit_in_word);
770                 word_in_map++;
771                 val |= (map[word_in_map] << bit_in_word);
772         }
773
774         val &= dev->tnode_mask;
775         val <<= dev->chunk_grp_bits;
776
777         return val;
778 }
779
780 /* ------------------- End of individual tnode manipulation -----------------*/
781
782 /* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
783  * The look up tree is represented by the top tnode and the number of top_level
784  * in the tree. 0 means only the level 0 tnode is in the tree.
785  */
786
787 /* FindLevel0Tnode finds the level 0 tnode, if one exists. */
788 struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
789                                        struct yaffs_file_var *file_struct,
790                                        u32 chunk_id)
791 {
792         struct yaffs_tnode *tn = file_struct->top;
793         u32 i;
794         int required_depth;
795         int level = file_struct->top_level;
796
797         (void) dev;
798
799         /* Check sane level and chunk Id */
800         if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
801                 return NULL;
802
803         if (chunk_id > YAFFS_MAX_CHUNK_ID)
804                 return NULL;
805
806         /* First check we're tall enough (ie enough top_level) */
807
808         i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
809         required_depth = 0;
810         while (i) {
811                 i >>= YAFFS_TNODES_INTERNAL_BITS;
812                 required_depth++;
813         }
814
815         if (required_depth > file_struct->top_level)
816                 return NULL;    /* Not tall enough, so we can't find it */
817
818         /* Traverse down to level 0 */
819         while (level > 0 && tn) {
820                 tn = tn->internal[(chunk_id >>
821                                    (YAFFS_TNODES_LEVEL0_BITS +
822                                     (level - 1) *
823                                     YAFFS_TNODES_INTERNAL_BITS)) &
824                                   YAFFS_TNODES_INTERNAL_MASK];
825                 level--;
826         }
827
828         return tn;
829 }
830
831 /* add_find_tnode_0 finds the level 0 tnode if it exists,
832  * otherwise first expands the tree.
833  * This happens in two steps:
834  *  1. If the tree isn't tall enough, then make it taller.
835  *  2. Scan down the tree towards the level 0 tnode adding tnodes if required.
836  *
837  * Used when modifying the tree.
838  *
839  *  If the tn argument is NULL, then a fresh tnode will be added otherwise the
840  *  specified tn will be plugged into the ttree.
841  */
842
843 struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
844                                            struct yaffs_file_var *file_struct,
845                                            u32 chunk_id,
846                                            struct yaffs_tnode *passed_tn)
847 {
848         int required_depth;
849         int i;
850         int l;
851         struct yaffs_tnode *tn;
852         u32 x;
853
854         /* Check sane level and page Id */
855         if (file_struct->top_level < 0 ||
856             file_struct->top_level > YAFFS_TNODES_MAX_LEVEL)
857                 return NULL;
858
859         if (chunk_id > YAFFS_MAX_CHUNK_ID)
860                 return NULL;
861
862         /* First check we're tall enough (ie enough top_level) */
863
864         x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
865         required_depth = 0;
866         while (x) {
867                 x >>= YAFFS_TNODES_INTERNAL_BITS;
868                 required_depth++;
869         }
870
871         if (required_depth > file_struct->top_level) {
872                 /* Not tall enough, gotta make the tree taller */
873                 for (i = file_struct->top_level; i < required_depth; i++) {
874
875                         tn = yaffs_get_tnode(dev);
876
877                         if (tn) {
878                                 tn->internal[0] = file_struct->top;
879                                 file_struct->top = tn;
880                                 file_struct->top_level++;
881                         } else {
882                                 yaffs_trace(YAFFS_TRACE_ERROR,
883                                         "yaffs: no more tnodes");
884                                 return NULL;
885                         }
886                 }
887         }
888
889         /* Traverse down to level 0, adding anything we need */
890
891         l = file_struct->top_level;
892         tn = file_struct->top;
893
894         if (l > 0) {
895                 while (l > 0 && tn) {
896                         x = (chunk_id >>
897                              (YAFFS_TNODES_LEVEL0_BITS +
898                               (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
899                             YAFFS_TNODES_INTERNAL_MASK;
900
901                         if ((l > 1) && !tn->internal[x]) {
902                                 /* Add missing non-level-zero tnode */
903                                 tn->internal[x] = yaffs_get_tnode(dev);
904                                 if (!tn->internal[x])
905                                         return NULL;
906                         } else if (l == 1) {
907                                 /* Looking from level 1 at level 0 */
908                                 if (passed_tn) {
909                                         /* If we already have one, release it */
910                                         if (tn->internal[x])
911                                                 yaffs_free_tnode(dev,
912                                                         tn->internal[x]);
913                                         tn->internal[x] = passed_tn;
914
915                                 } else if (!tn->internal[x]) {
916                                         /* Don't have one, none passed in */
917                                         tn->internal[x] = yaffs_get_tnode(dev);
918                                         if (!tn->internal[x])
919                                                 return NULL;
920                                 }
921                         }
922
923                         tn = tn->internal[x];
924                         l--;
925                 }
926         } else {
927                 /* top is level 0 */
928                 if (passed_tn) {
929                         memcpy(tn, passed_tn,
930                                (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8);
931                         yaffs_free_tnode(dev, passed_tn);
932                 }
933         }
934
935         return tn;
936 }
937
938 static int yaffs_tags_match(const struct yaffs_ext_tags *tags, int obj_id,
939                             int chunk_obj)
940 {
941         return (tags->chunk_id == chunk_obj &&
942                 tags->obj_id == obj_id &&
943                 !tags->is_deleted) ? 1 : 0;
944
945 }
946
947 static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, int the_chunk,
948                                         struct yaffs_ext_tags *tags, int obj_id,
949                                         int inode_chunk)
950 {
951         int j;
952
953         for (j = 0; the_chunk && j < dev->chunk_grp_size; j++) {
954                 if (yaffs_check_chunk_bit
955                     (dev, the_chunk / dev->param.chunks_per_block,
956                      the_chunk % dev->param.chunks_per_block)) {
957
958                         if (dev->chunk_grp_size == 1)
959                                 return the_chunk;
960                         else {
961                                 yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
962                                                          tags);
963                                 if (yaffs_tags_match(tags,
964                                                         obj_id, inode_chunk)) {
965                                         /* found it; */
966                                         return the_chunk;
967                                 }
968                         }
969                 }
970                 the_chunk++;
971         }
972         return -1;
973 }
974
975 static int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
976                                     struct yaffs_ext_tags *tags)
977 {
978         /*Get the Tnode, then get the level 0 offset chunk offset */
979         struct yaffs_tnode *tn;
980         int the_chunk = -1;
981         struct yaffs_ext_tags local_tags;
982         int ret_val = -1;
983         struct yaffs_dev *dev = in->my_dev;
984
985         if (!tags) {
986                 /* Passed a NULL, so use our own tags space */
987                 tags = &local_tags;
988         }
989
990         tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
991
992         if (!tn)
993                 return ret_val;
994
995         the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
996
997         ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
998                                               inode_chunk);
999         return ret_val;
1000 }
1001
1002 static int yaffs_find_del_file_chunk(struct yaffs_obj *in, int inode_chunk,
1003                                      struct yaffs_ext_tags *tags)
1004 {
1005         /* Get the Tnode, then get the level 0 offset chunk offset */
1006         struct yaffs_tnode *tn;
1007         int the_chunk = -1;
1008         struct yaffs_ext_tags local_tags;
1009         struct yaffs_dev *dev = in->my_dev;
1010         int ret_val = -1;
1011
1012         if (!tags) {
1013                 /* Passed a NULL, so use our own tags space */
1014                 tags = &local_tags;
1015         }
1016
1017         tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
1018
1019         if (!tn)
1020                 return ret_val;
1021
1022         the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
1023
1024         ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
1025                                               inode_chunk);
1026
1027         /* Delete the entry in the filestructure (if found) */
1028         if (ret_val != -1)
1029                 yaffs_load_tnode_0(dev, tn, inode_chunk, 0);
1030
1031         return ret_val;
1032 }
1033
1034 int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
1035                             int nand_chunk, int in_scan)
1036 {
1037         /* NB in_scan is zero unless scanning.
1038          * For forward scanning, in_scan is > 0;
1039          * for backward scanning in_scan is < 0
1040          *
1041          * nand_chunk = 0 is a dummy insert to make sure the tnodes are there.
1042          */
1043
1044         struct yaffs_tnode *tn;
1045         struct yaffs_dev *dev = in->my_dev;
1046         int existing_cunk;
1047         struct yaffs_ext_tags existing_tags;
1048         struct yaffs_ext_tags new_tags;
1049         unsigned existing_serial, new_serial;
1050
1051         if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) {
1052                 /* Just ignore an attempt at putting a chunk into a non-file
1053                  * during scanning.
1054                  * If it is not during Scanning then something went wrong!
1055                  */
1056                 if (!in_scan) {
1057                         yaffs_trace(YAFFS_TRACE_ERROR,
1058                                 "yaffs tragedy:attempt to put data chunk into a non-file"
1059                                 );
1060                         BUG();
1061                 }
1062
1063                 yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
1064                 return YAFFS_OK;
1065         }
1066
1067         tn = yaffs_add_find_tnode_0(dev,
1068                                     &in->variant.file_variant,
1069                                     inode_chunk, NULL);
1070         if (!tn)
1071                 return YAFFS_FAIL;
1072
1073         if (!nand_chunk)
1074                 /* Dummy insert, bail now */
1075                 return YAFFS_OK;
1076
1077         existing_cunk = yaffs_get_group_base(dev, tn, inode_chunk);
1078
1079         if (in_scan != 0) {
1080                 /* If we're scanning then we need to test for duplicates
1081                  * NB This does not need to be efficient since it should only
1082                  * happen when the power fails during a write, then only one
1083                  * chunk should ever be affected.
1084                  *
1085                  * Correction for YAFFS2: This could happen quite a lot and we
1086                  * need to think about efficiency! TODO
1087                  * Update: For backward scanning we don't need to re-read tags
1088                  * so this is quite cheap.
1089                  */
1090
1091                 if (existing_cunk > 0) {
1092                         /* NB Right now existing chunk will not be real
1093                          * chunk_id if the chunk group size > 1
1094                          * thus we have to do a FindChunkInFile to get the
1095                          * real chunk id.
1096                          *
1097                          * We have a duplicate now we need to decide which
1098                          * one to use:
1099                          *
1100                          * Backwards scanning YAFFS2: The old one is what
1101                          * we use, dump the new one.
1102                          * YAFFS1: Get both sets of tags and compare serial
1103                          * numbers.
1104                          */
1105
1106                         if (in_scan > 0) {
1107                                 /* Only do this for forward scanning */
1108                                 yaffs_rd_chunk_tags_nand(dev,
1109                                                          nand_chunk,
1110                                                          NULL, &new_tags);
1111
1112                                 /* Do a proper find */
1113                                 existing_cunk =
1114                                     yaffs_find_chunk_in_file(in, inode_chunk,
1115                                                              &existing_tags);
1116                         }
1117
1118                         if (existing_cunk <= 0) {
1119                                 /*Hoosterman - how did this happen? */
1120
1121                                 yaffs_trace(YAFFS_TRACE_ERROR,
1122                                         "yaffs tragedy: existing chunk < 0 in scan"
1123                                         );
1124
1125                         }
1126
1127                         /* NB The deleted flags should be false, otherwise
1128                          * the chunks will not be loaded during a scan
1129                          */
1130
1131                         if (in_scan > 0) {
1132                                 new_serial = new_tags.serial_number;
1133                                 existing_serial = existing_tags.serial_number;
1134                         }
1135
1136                         if ((in_scan > 0) &&
1137                             (existing_cunk <= 0 ||
1138                              ((existing_serial + 1) & 3) == new_serial)) {
1139                                 /* Forward scanning.
1140                                  * Use new
1141                                  * Delete the old one and drop through to
1142                                  * update the tnode
1143                                  */
1144                                 yaffs_chunk_del(dev, existing_cunk, 1,
1145                                                 __LINE__);
1146                         } else {
1147                                 /* Backward scanning or we want to use the
1148                                  * existing one
1149                                  * Delete the new one and return early so that
1150                                  * the tnode isn't changed
1151                                  */
1152                                 yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
1153                                 return YAFFS_OK;
1154                         }
1155                 }
1156
1157         }
1158
1159         if (existing_cunk == 0)
1160                 in->n_data_chunks++;
1161
1162         yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk);
1163
1164         return YAFFS_OK;
1165 }
1166
1167 static void yaffs_soft_del_chunk(struct yaffs_dev *dev, int chunk)
1168 {
1169         struct yaffs_block_info *the_block;
1170         unsigned block_no;
1171
1172         yaffs_trace(YAFFS_TRACE_DELETION, "soft delete chunk %d", chunk);
1173
1174         block_no = chunk / dev->param.chunks_per_block;
1175         the_block = yaffs_get_block_info(dev, block_no);
1176         if (the_block) {
1177                 the_block->soft_del_pages++;
1178                 dev->n_free_chunks++;
1179                 yaffs2_update_oldest_dirty_seq(dev, block_no, the_block);
1180         }
1181 }
1182
1183 /* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all
1184  * the chunks in the file.
1185  * All soft deleting does is increment the block's softdelete count and pulls
1186  * the chunk out of the tnode.
1187  * Thus, essentially this is the same as DeleteWorker except that the chunks
1188  * are soft deleted.
1189  */
1190
1191 static int yaffs_soft_del_worker(struct yaffs_obj *in, struct yaffs_tnode *tn,
1192                                  u32 level, int chunk_offset)
1193 {
1194         int i;
1195         int the_chunk;
1196         int all_done = 1;
1197         struct yaffs_dev *dev = in->my_dev;
1198
1199         if (!tn)
1200                 return 1;
1201
1202         if (level > 0) {
1203                 for (i = YAFFS_NTNODES_INTERNAL - 1;
1204                         all_done && i >= 0;
1205                         i--) {
1206                         if (tn->internal[i]) {
1207                                 all_done =
1208                                     yaffs_soft_del_worker(in,
1209                                         tn->internal[i],
1210                                         level - 1,
1211                                         (chunk_offset <<
1212                                         YAFFS_TNODES_INTERNAL_BITS)
1213                                         + i);
1214                                 if (all_done) {
1215                                         yaffs_free_tnode(dev,
1216                                                 tn->internal[i]);
1217                                         tn->internal[i] = NULL;
1218                                 } else {
1219                                         /* Can this happen? */
1220                                 }
1221                         }
1222                 }
1223                 return (all_done) ? 1 : 0;
1224         }
1225
1226         /* level 0 */
1227          for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
1228                 the_chunk = yaffs_get_group_base(dev, tn, i);
1229                 if (the_chunk) {
1230                         yaffs_soft_del_chunk(dev, the_chunk);
1231                         yaffs_load_tnode_0(dev, tn, i, 0);
1232                 }
1233         }
1234         return 1;
1235 }
1236
1237 static void yaffs_remove_obj_from_dir(struct yaffs_obj *obj)
1238 {
1239         struct yaffs_dev *dev = obj->my_dev;
1240         struct yaffs_obj *parent;
1241
1242         yaffs_verify_obj_in_dir(obj);
1243         parent = obj->parent;
1244
1245         yaffs_verify_dir(parent);
1246
1247         if (dev && dev->param.remove_obj_fn)
1248                 dev->param.remove_obj_fn(obj);
1249
1250         list_del_init(&obj->siblings);
1251         obj->parent = NULL;
1252
1253         yaffs_verify_dir(parent);
1254 }
1255
1256 void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj)
1257 {
1258         if (!directory) {
1259                 yaffs_trace(YAFFS_TRACE_ALWAYS,
1260                         "tragedy: Trying to add an object to a null pointer directory"
1261                         );
1262                 BUG();
1263                 return;
1264         }
1265         if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
1266                 yaffs_trace(YAFFS_TRACE_ALWAYS,
1267                         "tragedy: Trying to add an object to a non-directory"
1268                         );
1269                 BUG();
1270         }
1271
1272         if (obj->siblings.prev == NULL) {
1273                 /* Not initialised */
1274                 BUG();
1275         }
1276
1277         yaffs_verify_dir(directory);
1278
1279         yaffs_remove_obj_from_dir(obj);
1280
1281         /* Now add it */
1282         list_add(&obj->siblings, &directory->variant.dir_variant.children);
1283         obj->parent = directory;
1284
1285         if (directory == obj->my_dev->unlinked_dir
1286             || directory == obj->my_dev->del_dir) {
1287                 obj->unlinked = 1;
1288                 obj->my_dev->n_unlinked_files++;
1289                 obj->rename_allowed = 0;
1290         }
1291
1292         yaffs_verify_dir(directory);
1293         yaffs_verify_obj_in_dir(obj);
1294 }
1295
1296 static int yaffs_change_obj_name(struct yaffs_obj *obj,
1297                                  struct yaffs_obj *new_dir,
1298                                  const YCHAR *new_name, int force, int shadows)
1299 {
1300         int unlink_op;
1301         int del_op;
1302         struct yaffs_obj *existing_target;
1303
1304         if (new_dir == NULL)
1305                 new_dir = obj->parent;  /* use the old directory */
1306
1307         if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
1308                 yaffs_trace(YAFFS_TRACE_ALWAYS,
1309                         "tragedy: yaffs_change_obj_name: new_dir is not a directory"
1310                         );
1311                 BUG();
1312         }
1313
1314         unlink_op = (new_dir == obj->my_dev->unlinked_dir);
1315         del_op = (new_dir == obj->my_dev->del_dir);
1316
1317         existing_target = yaffs_find_by_name(new_dir, new_name);
1318
1319         /* If the object is a file going into the unlinked directory,
1320          *   then it is OK to just stuff it in since duplicate names are OK.
1321          *   else only proceed if the new name does not exist and we're putting
1322          *   it into a directory.
1323          */
1324         if (!(unlink_op || del_op || force ||
1325               shadows > 0 || !existing_target) ||
1326               new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
1327                 return YAFFS_FAIL;
1328
1329         yaffs_set_obj_name(obj, new_name);
1330         obj->dirty = 1;
1331         yaffs_add_obj_to_dir(new_dir, obj);
1332
1333         if (unlink_op)
1334                 obj->unlinked = 1;
1335
1336         /* If it is a deletion then we mark it as a shrink for gc  */
1337         if (yaffs_update_oh(obj, new_name, 0, del_op, shadows, NULL) >= 0)
1338                 return YAFFS_OK;
1339
1340         return YAFFS_FAIL;
1341 }
1342
1343 /*------------------------ Short Operations Cache ------------------------------
1344  *   In many situations where there is no high level buffering  a lot of
1345  *   reads might be short sequential reads, and a lot of writes may be short
1346  *   sequential writes. eg. scanning/writing a jpeg file.
1347  *   In these cases, a short read/write cache can provide a huge perfomance
1348  *   benefit with dumb-as-a-rock code.
1349  *   In Linux, the page cache provides read buffering and the short op cache
1350  *   provides write buffering.
1351  *
1352  *   There are a small number (~10) of cache chunks per device so that we don't
1353  *   need a very intelligent search.
1354  */
1355
1356 static int yaffs_obj_cache_dirty(struct yaffs_obj *obj)
1357 {
1358         struct yaffs_dev *dev = obj->my_dev;
1359         int i;
1360         struct yaffs_cache *cache;
1361         int n_caches = obj->my_dev->param.n_caches;
1362
1363         for (i = 0; i < n_caches; i++) {
1364                 cache = &dev->cache[i];
1365                 if (cache->object == obj && cache->dirty)
1366                         return 1;
1367         }
1368
1369         return 0;
1370 }
1371
1372 static void yaffs_flush_file_cache(struct yaffs_obj *obj)
1373 {
1374         struct yaffs_dev *dev = obj->my_dev;
1375         int lowest = -99;       /* Stop compiler whining. */
1376         int i;
1377         struct yaffs_cache *cache;
1378         int chunk_written = 0;
1379         int n_caches = obj->my_dev->param.n_caches;
1380
1381         if (n_caches < 1)
1382                 return;
1383         do {
1384                 cache = NULL;
1385
1386                 /* Find the lowest dirty chunk for this object */
1387                 for (i = 0; i < n_caches; i++) {
1388                         if (dev->cache[i].object == obj &&
1389                             dev->cache[i].dirty) {
1390                                 if (!cache ||
1391                                     dev->cache[i].chunk_id < lowest) {
1392                                         cache = &dev->cache[i];
1393                                         lowest = cache->chunk_id;
1394                                 }
1395                         }
1396                 }
1397
1398                 if (cache && !cache->locked) {
1399                         /* Write it out and free it up */
1400                         chunk_written =
1401                             yaffs_wr_data_obj(cache->object,
1402                                               cache->chunk_id,
1403                                               cache->data,
1404                                               cache->n_bytes, 1);
1405                         cache->dirty = 0;
1406                         cache->object = NULL;
1407                 }
1408         } while (cache && chunk_written > 0);
1409
1410         if (cache)
1411                 /* Hoosterman, disk full while writing cache out. */
1412                 yaffs_trace(YAFFS_TRACE_ERROR,
1413                         "yaffs tragedy: no space during cache write");
1414 }
1415
1416 /*yaffs_flush_whole_cache(dev)
1417  *
1418  *
1419  */
1420
1421 void yaffs_flush_whole_cache(struct yaffs_dev *dev)
1422 {
1423         struct yaffs_obj *obj;
1424         int n_caches = dev->param.n_caches;
1425         int i;
1426
1427         /* Find a dirty object in the cache and flush it...
1428          * until there are no further dirty objects.
1429          */
1430         do {
1431                 obj = NULL;
1432                 for (i = 0; i < n_caches && !obj; i++) {
1433                         if (dev->cache[i].object && dev->cache[i].dirty)
1434                                 obj = dev->cache[i].object;
1435                 }
1436                 if (obj)
1437                         yaffs_flush_file_cache(obj);
1438         } while (obj);
1439
1440 }
1441
1442 /* Grab us a cache chunk for use.
1443  * First look for an empty one.
1444  * Then look for the least recently used non-dirty one.
1445  * Then look for the least recently used dirty one...., flush and look again.
1446  */
1447 static struct yaffs_cache *yaffs_grab_chunk_worker(struct yaffs_dev *dev)
1448 {
1449         int i;
1450
1451         if (dev->param.n_caches > 0) {
1452                 for (i = 0; i < dev->param.n_caches; i++) {
1453                         if (!dev->cache[i].object)
1454                                 return &dev->cache[i];
1455                 }
1456         }
1457         return NULL;
1458 }
1459
1460 static struct yaffs_cache *yaffs_grab_chunk_cache(struct yaffs_dev *dev)
1461 {
1462         struct yaffs_cache *cache;
1463         struct yaffs_obj *the_obj;
1464         int usage;
1465         int i;
1466         int pushout;
1467
1468         if (dev->param.n_caches < 1)
1469                 return NULL;
1470
1471         /* Try find a non-dirty one... */
1472
1473         cache = yaffs_grab_chunk_worker(dev);
1474
1475         if (!cache) {
1476                 /* They were all dirty, find the LRU object and flush
1477                  * its cache, then  find again.
1478                  * NB what's here is not very accurate,
1479                  * we actually flush the object with the LRU chunk.
1480                  */
1481
1482                 /* With locking we can't assume we can use entry zero,
1483                  * Set the_obj to a valid pointer for Coverity. */
1484                 the_obj = dev->cache[0].object;
1485                 usage = -1;
1486                 cache = NULL;
1487                 pushout = -1;
1488
1489                 for (i = 0; i < dev->param.n_caches; i++) {
1490                         if (dev->cache[i].object &&
1491                             !dev->cache[i].locked &&
1492                             (dev->cache[i].last_use < usage ||
1493                             !cache)) {
1494                                 usage = dev->cache[i].last_use;
1495                                 the_obj = dev->cache[i].object;
1496                                 cache = &dev->cache[i];
1497                                 pushout = i;
1498                         }
1499                 }
1500
1501                 if (!cache || cache->dirty) {
1502                         /* Flush and try again */
1503                         yaffs_flush_file_cache(the_obj);
1504                         cache = yaffs_grab_chunk_worker(dev);
1505                 }
1506         }
1507         return cache;
1508 }
1509
1510 /* Find a cached chunk */
1511 static struct yaffs_cache *yaffs_find_chunk_cache(const struct yaffs_obj *obj,
1512                                                   int chunk_id)
1513 {
1514         struct yaffs_dev *dev = obj->my_dev;
1515         int i;
1516
1517         if (dev->param.n_caches < 1)
1518                 return NULL;
1519
1520         for (i = 0; i < dev->param.n_caches; i++) {
1521                 if (dev->cache[i].object == obj &&
1522                     dev->cache[i].chunk_id == chunk_id) {
1523                         dev->cache_hits++;
1524
1525                         return &dev->cache[i];
1526                 }
1527         }
1528         return NULL;
1529 }
1530
1531 /* Mark the chunk for the least recently used algorithym */
1532 static void yaffs_use_cache(struct yaffs_dev *dev, struct yaffs_cache *cache,
1533                             int is_write)
1534 {
1535         int i;
1536
1537         if (dev->param.n_caches < 1)
1538                 return;
1539
1540         if (dev->cache_last_use < 0 ||
1541                 dev->cache_last_use > 100000000) {
1542                 /* Reset the cache usages */
1543                 for (i = 1; i < dev->param.n_caches; i++)
1544                         dev->cache[i].last_use = 0;
1545
1546                 dev->cache_last_use = 0;
1547         }
1548         dev->cache_last_use++;
1549         cache->last_use = dev->cache_last_use;
1550
1551         if (is_write)
1552                 cache->dirty = 1;
1553 }
1554
1555 /* Invalidate a single cache page.
1556  * Do this when a whole page gets written,
1557  * ie the short cache for this page is no longer valid.
1558  */
1559 static void yaffs_invalidate_chunk_cache(struct yaffs_obj *object, int chunk_id)
1560 {
1561         struct yaffs_cache *cache;
1562
1563         if (object->my_dev->param.n_caches > 0) {
1564                 cache = yaffs_find_chunk_cache(object, chunk_id);
1565
1566                 if (cache)
1567                         cache->object = NULL;
1568         }
1569 }
1570
1571 /* Invalidate all the cache pages associated with this object
1572  * Do this whenever ther file is deleted or resized.
1573  */
1574 static void yaffs_invalidate_whole_cache(struct yaffs_obj *in)
1575 {
1576         int i;
1577         struct yaffs_dev *dev = in->my_dev;
1578
1579         if (dev->param.n_caches > 0) {
1580                 /* Invalidate it. */
1581                 for (i = 0; i < dev->param.n_caches; i++) {
1582                         if (dev->cache[i].object == in)
1583                                 dev->cache[i].object = NULL;
1584                 }
1585         }
1586 }
1587
1588 static void yaffs_unhash_obj(struct yaffs_obj *obj)
1589 {
1590         int bucket;
1591         struct yaffs_dev *dev = obj->my_dev;
1592
1593         /* If it is still linked into the bucket list, free from the list */
1594         if (!list_empty(&obj->hash_link)) {
1595                 list_del_init(&obj->hash_link);
1596                 bucket = yaffs_hash_fn(obj->obj_id);
1597                 dev->obj_bucket[bucket].count--;
1598         }
1599 }
1600
1601 /*  FreeObject frees up a Object and puts it back on the free list */
1602 static void yaffs_free_obj(struct yaffs_obj *obj)
1603 {
1604         struct yaffs_dev *dev;
1605
1606         if (!obj) {
1607                 BUG();
1608                 return;
1609         }
1610         dev = obj->my_dev;
1611         yaffs_trace(YAFFS_TRACE_OS, "FreeObject %p inode %p",
1612                 obj, obj->my_inode);
1613         if (obj->parent)
1614                 BUG();
1615         if (!list_empty(&obj->siblings))
1616                 BUG();
1617
1618         if (obj->my_inode) {
1619                 /* We're still hooked up to a cached inode.
1620                  * Don't delete now, but mark for later deletion
1621                  */
1622                 obj->defered_free = 1;
1623                 return;
1624         }
1625
1626         yaffs_unhash_obj(obj);
1627
1628         yaffs_free_raw_obj(dev, obj);
1629         dev->n_obj--;
1630         dev->checkpoint_blocks_required = 0;    /* force recalculation */
1631 }
1632
1633 void yaffs_handle_defered_free(struct yaffs_obj *obj)
1634 {
1635         if (obj->defered_free)
1636                 yaffs_free_obj(obj);
1637 }
1638
1639 static int yaffs_generic_obj_del(struct yaffs_obj *in)
1640 {
1641         /* Iinvalidate the file's data in the cache, without flushing. */
1642         yaffs_invalidate_whole_cache(in);
1643
1644         if (in->my_dev->param.is_yaffs2 && in->parent != in->my_dev->del_dir) {
1645                 /* Move to unlinked directory so we have a deletion record */
1646                 yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0,
1647                                       0);
1648         }
1649
1650         yaffs_remove_obj_from_dir(in);
1651         yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__);
1652         in->hdr_chunk = 0;
1653
1654         yaffs_free_obj(in);
1655         return YAFFS_OK;
1656
1657 }
1658
1659 static void yaffs_soft_del_file(struct yaffs_obj *obj)
1660 {
1661         if (!obj->deleted ||
1662             obj->variant_type != YAFFS_OBJECT_TYPE_FILE ||
1663             obj->soft_del)
1664                 return;
1665
1666         if (obj->n_data_chunks <= 0) {
1667                 /* Empty file with no duplicate object headers,
1668                  * just delete it immediately */
1669                 yaffs_free_tnode(obj->my_dev, obj->variant.file_variant.top);
1670                 obj->variant.file_variant.top = NULL;
1671                 yaffs_trace(YAFFS_TRACE_TRACING,
1672                         "yaffs: Deleting empty file %d",
1673                         obj->obj_id);
1674                 yaffs_generic_obj_del(obj);
1675         } else {
1676                 yaffs_soft_del_worker(obj,
1677                                       obj->variant.file_variant.top,
1678                                       obj->variant.
1679                                       file_variant.top_level, 0);
1680                 obj->soft_del = 1;
1681         }
1682 }
1683
1684 /* Pruning removes any part of the file structure tree that is beyond the
1685  * bounds of the file (ie that does not point to chunks).
1686  *
1687  * A file should only get pruned when its size is reduced.
1688  *
1689  * Before pruning, the chunks must be pulled from the tree and the
1690  * level 0 tnode entries must be zeroed out.
1691  * Could also use this for file deletion, but that's probably better handled
1692  * by a special case.
1693  *
1694  * This function is recursive. For levels > 0 the function is called again on
1695  * any sub-tree. For level == 0 we just check if the sub-tree has data.
1696  * If there is no data in a subtree then it is pruned.
1697  */
1698
1699 static struct yaffs_tnode *yaffs_prune_worker(struct yaffs_dev *dev,
1700                                               struct yaffs_tnode *tn, u32 level,
1701                                               int del0)
1702 {
1703         int i;
1704         int has_data;
1705
1706         if (!tn)
1707                 return tn;
1708
1709         has_data = 0;
1710
1711         if (level > 0) {
1712                 for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
1713                         if (tn->internal[i]) {
1714                                 tn->internal[i] =
1715                                     yaffs_prune_worker(dev,
1716                                                 tn->internal[i],
1717                                                 level - 1,
1718                                                 (i == 0) ? del0 : 1);
1719                         }
1720
1721                         if (tn->internal[i])
1722                                 has_data++;
1723                 }
1724         } else {
1725                 int tnode_size_u32 = dev->tnode_size / sizeof(u32);
1726                 u32 *map = (u32 *) tn;
1727
1728                 for (i = 0; !has_data && i < tnode_size_u32; i++) {
1729                         if (map[i])
1730                                 has_data++;
1731                 }
1732         }
1733
1734         if (has_data == 0 && del0) {
1735                 /* Free and return NULL */
1736                 yaffs_free_tnode(dev, tn);
1737                 tn = NULL;
1738         }
1739         return tn;
1740 }
1741
1742 static int yaffs_prune_tree(struct yaffs_dev *dev,
1743                             struct yaffs_file_var *file_struct)
1744 {
1745         int i;
1746         int has_data;
1747         int done = 0;
1748         struct yaffs_tnode *tn;
1749
1750         if (file_struct->top_level < 1)
1751                 return YAFFS_OK;
1752
1753         file_struct->top =
1754            yaffs_prune_worker(dev, file_struct->top, file_struct->top_level, 0);
1755
1756         /* Now we have a tree with all the non-zero branches NULL but
1757          * the height is the same as it was.
1758          * Let's see if we can trim internal tnodes to shorten the tree.
1759          * We can do this if only the 0th element in the tnode is in use
1760          * (ie all the non-zero are NULL)
1761          */
1762
1763         while (file_struct->top_level && !done) {
1764                 tn = file_struct->top;
1765
1766                 has_data = 0;
1767                 for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
1768                         if (tn->internal[i])
1769                                 has_data++;
1770                 }
1771
1772                 if (!has_data) {
1773                         file_struct->top = tn->internal[0];
1774                         file_struct->top_level--;
1775                         yaffs_free_tnode(dev, tn);
1776                 } else {
1777                         done = 1;
1778                 }
1779         }
1780
1781         return YAFFS_OK;
1782 }
1783
1784 /*-------------------- End of File Structure functions.-------------------*/
1785
1786 /* alloc_empty_obj gets us a clean Object.*/
1787 static struct yaffs_obj *yaffs_alloc_empty_obj(struct yaffs_dev *dev)
1788 {
1789         struct yaffs_obj *obj = yaffs_alloc_raw_obj(dev);
1790
1791         if (!obj)
1792                 return obj;
1793
1794         dev->n_obj++;
1795
1796         /* Now sweeten it up... */
1797
1798         memset(obj, 0, sizeof(struct yaffs_obj));
1799         obj->being_created = 1;
1800
1801         obj->my_dev = dev;
1802         obj->hdr_chunk = 0;
1803         obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN;
1804         INIT_LIST_HEAD(&(obj->hard_links));
1805         INIT_LIST_HEAD(&(obj->hash_link));
1806         INIT_LIST_HEAD(&obj->siblings);
1807
1808         /* Now make the directory sane */
1809         if (dev->root_dir) {
1810                 obj->parent = dev->root_dir;
1811                 list_add(&(obj->siblings),
1812                          &dev->root_dir->variant.dir_variant.children);
1813         }
1814
1815         /* Add it to the lost and found directory.
1816          * NB Can't put root or lost-n-found in lost-n-found so
1817          * check if lost-n-found exists first
1818          */
1819         if (dev->lost_n_found)
1820                 yaffs_add_obj_to_dir(dev->lost_n_found, obj);
1821
1822         obj->being_created = 0;
1823
1824         dev->checkpoint_blocks_required = 0;    /* force recalculation */
1825
1826         return obj;
1827 }
1828
1829 static int yaffs_find_nice_bucket(struct yaffs_dev *dev)
1830 {
1831         int i;
1832         int l = 999;
1833         int lowest = 999999;
1834
1835         /* Search for the shortest list or one that
1836          * isn't too long.
1837          */
1838
1839         for (i = 0; i < 10 && lowest > 4; i++) {
1840                 dev->bucket_finder++;
1841                 dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS;
1842                 if (dev->obj_bucket[dev->bucket_finder].count < lowest) {
1843                         lowest = dev->obj_bucket[dev->bucket_finder].count;
1844                         l = dev->bucket_finder;
1845                 }
1846         }
1847
1848         return l;
1849 }
1850
1851 static int yaffs_new_obj_id(struct yaffs_dev *dev)
1852 {
1853         int bucket = yaffs_find_nice_bucket(dev);
1854         int found = 0;
1855         struct list_head *i;
1856         u32 n = (u32) bucket;
1857
1858         /* Now find an object value that has not already been taken
1859          * by scanning the list.
1860          */
1861
1862         while (!found) {
1863                 found = 1;
1864                 n += YAFFS_NOBJECT_BUCKETS;
1865                 if (1 || dev->obj_bucket[bucket].count > 0) {
1866                         list_for_each(i, &dev->obj_bucket[bucket].list) {
1867                                 /* If there is already one in the list */
1868                                 if (i && list_entry(i, struct yaffs_obj,
1869                                                     hash_link)->obj_id == n) {
1870                                         found = 0;
1871                                 }
1872                         }
1873                 }
1874         }
1875         return n;
1876 }
1877
1878 static void yaffs_hash_obj(struct yaffs_obj *in)
1879 {
1880         int bucket = yaffs_hash_fn(in->obj_id);
1881         struct yaffs_dev *dev = in->my_dev;
1882
1883         list_add(&in->hash_link, &dev->obj_bucket[bucket].list);
1884         dev->obj_bucket[bucket].count++;
1885 }
1886
1887 struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number)
1888 {
1889         int bucket = yaffs_hash_fn(number);
1890         struct list_head *i;
1891         struct yaffs_obj *in;
1892
1893         list_for_each(i, &dev->obj_bucket[bucket].list) {
1894                 /* Look if it is in the list */
1895                 in = list_entry(i, struct yaffs_obj, hash_link);
1896                 if (in->obj_id == number) {
1897                         /* Don't show if it is defered free */
1898                         if (in->defered_free)
1899                                 return NULL;
1900                         return in;
1901                 }
1902         }
1903
1904         return NULL;
1905 }
1906
1907 static struct yaffs_obj *yaffs_new_obj(struct yaffs_dev *dev, int number,
1908                                 enum yaffs_obj_type type)
1909 {
1910         struct yaffs_obj *the_obj = NULL;
1911         struct yaffs_tnode *tn = NULL;
1912
1913         if (number < 0)
1914                 number = yaffs_new_obj_id(dev);
1915
1916         if (type == YAFFS_OBJECT_TYPE_FILE) {
1917                 tn = yaffs_get_tnode(dev);
1918                 if (!tn)
1919                         return NULL;
1920         }
1921
1922         the_obj = yaffs_alloc_empty_obj(dev);
1923         if (!the_obj) {
1924                 if (tn)
1925                         yaffs_free_tnode(dev, tn);
1926                 return NULL;
1927         }
1928
1929         the_obj->fake = 0;
1930         the_obj->rename_allowed = 1;
1931         the_obj->unlink_allowed = 1;
1932         the_obj->obj_id = number;
1933         yaffs_hash_obj(the_obj);
1934         the_obj->variant_type = type;
1935         yaffs_load_current_time(the_obj, 1, 1);
1936
1937         switch (type) {
1938         case YAFFS_OBJECT_TYPE_FILE:
1939                 the_obj->variant.file_variant.file_size = 0;
1940                 the_obj->variant.file_variant.scanned_size = 0;
1941                 the_obj->variant.file_variant.shrink_size =
1942                                                 yaffs_max_file_size(dev);
1943                 the_obj->variant.file_variant.top_level = 0;
1944                 the_obj->variant.file_variant.top = tn;
1945                 break;
1946         case YAFFS_OBJECT_TYPE_DIRECTORY:
1947                 INIT_LIST_HEAD(&the_obj->variant.dir_variant.children);
1948                 INIT_LIST_HEAD(&the_obj->variant.dir_variant.dirty);
1949                 break;
1950         case YAFFS_OBJECT_TYPE_SYMLINK:
1951         case YAFFS_OBJECT_TYPE_HARDLINK:
1952         case YAFFS_OBJECT_TYPE_SPECIAL:
1953                 /* No action required */
1954                 break;
1955         case YAFFS_OBJECT_TYPE_UNKNOWN:
1956                 /* todo this should not happen */
1957                 break;
1958         }
1959         return the_obj;
1960 }
1961
1962 static struct yaffs_obj *yaffs_create_fake_dir(struct yaffs_dev *dev,
1963                                                int number, u32 mode)
1964 {
1965
1966         struct yaffs_obj *obj =
1967             yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
1968
1969         if (!obj)
1970                 return NULL;
1971
1972         obj->fake = 1;  /* it is fake so it might not use NAND */
1973         obj->rename_allowed = 0;
1974         obj->unlink_allowed = 0;
1975         obj->deleted = 0;
1976         obj->unlinked = 0;
1977         obj->yst_mode = mode;
1978         obj->my_dev = dev;
1979         obj->hdr_chunk = 0;     /* Not a valid chunk. */
1980         return obj;
1981
1982 }
1983
1984
1985 static void yaffs_init_tnodes_and_objs(struct yaffs_dev *dev)
1986 {
1987         int i;
1988
1989         dev->n_obj = 0;
1990         dev->n_tnodes = 0;
1991         yaffs_init_raw_tnodes_and_objs(dev);
1992
1993         for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
1994                 INIT_LIST_HEAD(&dev->obj_bucket[i].list);
1995                 dev->obj_bucket[i].count = 0;
1996         }
1997 }
1998
1999 struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
2000                                                  int number,
2001                                                  enum yaffs_obj_type type)
2002 {
2003         struct yaffs_obj *the_obj = NULL;
2004
2005         if (number > 0)
2006                 the_obj = yaffs_find_by_number(dev, number);
2007
2008         if (!the_obj)
2009                 the_obj = yaffs_new_obj(dev, number, type);
2010
2011         return the_obj;
2012
2013 }
2014
2015 YCHAR *yaffs_clone_str(const YCHAR *str)
2016 {
2017         YCHAR *new_str = NULL;
2018         int len;
2019
2020         if (!str)
2021                 str = _Y("");
2022
2023         len = strnlen(str, YAFFS_MAX_ALIAS_LENGTH);
2024         new_str = kmalloc((len + 1) * sizeof(YCHAR), GFP_NOFS);
2025         if (new_str) {
2026                 strncpy(new_str, str, len);
2027                 new_str[len] = 0;
2028         }
2029         return new_str;
2030
2031 }
2032 /*
2033  *yaffs_update_parent() handles fixing a directories mtime and ctime when a new
2034  * link (ie. name) is created or deleted in the directory.
2035  *
2036  * ie.
2037  *   create dir/a : update dir's mtime/ctime
2038  *   rm dir/a:   update dir's mtime/ctime
2039  *   modify dir/a: don't update dir's mtimme/ctime
2040  *
2041  * This can be handled immediately or defered. Defering helps reduce the number
2042  * of updates when many files in a directory are changed within a brief period.
2043  *
2044  * If the directory updating is defered then yaffs_update_dirty_dirs must be
2045  * called periodically.
2046  */
2047
2048 static void yaffs_update_parent(struct yaffs_obj *obj)
2049 {
2050         struct yaffs_dev *dev;
2051
2052         if (!obj)
2053                 return;
2054         dev = obj->my_dev;
2055         obj->dirty = 1;
2056         yaffs_load_current_time(obj, 0, 1);
2057         if (dev->param.defered_dir_update) {
2058                 struct list_head *link = &obj->variant.dir_variant.dirty;
2059
2060                 if (list_empty(link)) {
2061                         list_add(link, &dev->dirty_dirs);
2062                         yaffs_trace(YAFFS_TRACE_BACKGROUND,
2063                           "Added object %d to dirty directories",
2064                            obj->obj_id);
2065                 }
2066
2067         } else {
2068                 yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
2069         }
2070 }
2071
2072 void yaffs_update_dirty_dirs(struct yaffs_dev *dev)
2073 {
2074         struct list_head *link;
2075         struct yaffs_obj *obj;
2076         struct yaffs_dir_var *d_s;
2077         union yaffs_obj_var *o_v;
2078
2079         yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update dirty directories");
2080
2081         while (!list_empty(&dev->dirty_dirs)) {
2082                 link = dev->dirty_dirs.next;
2083                 list_del_init(link);
2084
2085                 d_s = list_entry(link, struct yaffs_dir_var, dirty);
2086                 o_v = list_entry(d_s, union yaffs_obj_var, dir_variant);
2087                 obj = list_entry(o_v, struct yaffs_obj, variant);
2088
2089                 yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update directory %d",
2090                         obj->obj_id);
2091
2092                 if (obj->dirty)
2093                         yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
2094         }
2095 }
2096
2097 /*
2098  * Mknod (create) a new object.
2099  * equiv_obj only has meaning for a hard link;
2100  * alias_str only has meaning for a symlink.
2101  * rdev only has meaning for devices (a subset of special objects)
2102  */
2103
2104 static struct yaffs_obj *yaffs_create_obj(enum yaffs_obj_type type,
2105                                           struct yaffs_obj *parent,
2106                                           const YCHAR *name,
2107                                           u32 mode,
2108                                           u32 uid,
2109                                           u32 gid,
2110                                           struct yaffs_obj *equiv_obj,
2111                                           const YCHAR *alias_str, u32 rdev)
2112 {
2113         struct yaffs_obj *in;
2114         YCHAR *str = NULL;
2115         struct yaffs_dev *dev = parent->my_dev;
2116
2117         /* Check if the entry exists.
2118          * If it does then fail the call since we don't want a dup. */
2119         if (yaffs_find_by_name(parent, name))
2120                 return NULL;
2121
2122         if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
2123                 str = yaffs_clone_str(alias_str);
2124                 if (!str)
2125                         return NULL;
2126         }
2127
2128         in = yaffs_new_obj(dev, -1, type);
2129
2130         if (!in) {
2131                 kfree(str);
2132                 return NULL;
2133         }
2134
2135         in->hdr_chunk = 0;
2136         in->valid = 1;
2137         in->variant_type = type;
2138
2139         in->yst_mode = mode;
2140
2141         yaffs_attribs_init(in, gid, uid, rdev);
2142
2143         in->n_data_chunks = 0;
2144
2145         yaffs_set_obj_name(in, name);
2146         in->dirty = 1;
2147
2148         yaffs_add_obj_to_dir(parent, in);
2149
2150         in->my_dev = parent->my_dev;
2151
2152         switch (type) {
2153         case YAFFS_OBJECT_TYPE_SYMLINK:
2154                 in->variant.symlink_variant.alias = str;
2155                 break;
2156         case YAFFS_OBJECT_TYPE_HARDLINK:
2157                 in->variant.hardlink_variant.equiv_obj = equiv_obj;
2158                 in->variant.hardlink_variant.equiv_id = equiv_obj->obj_id;
2159                 list_add(&in->hard_links, &equiv_obj->hard_links);
2160                 break;
2161         case YAFFS_OBJECT_TYPE_FILE:
2162         case YAFFS_OBJECT_TYPE_DIRECTORY:
2163         case YAFFS_OBJECT_TYPE_SPECIAL:
2164         case YAFFS_OBJECT_TYPE_UNKNOWN:
2165                 /* do nothing */
2166                 break;
2167         }
2168
2169         if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) {
2170                 /* Could not create the object header, fail */
2171                 yaffs_del_obj(in);
2172                 in = NULL;
2173         }
2174
2175         if (in)
2176                 yaffs_update_parent(parent);
2177
2178         return in;
2179 }
2180
2181 struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
2182                                     const YCHAR *name, u32 mode, u32 uid,
2183                                     u32 gid)
2184 {
2185         return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
2186                                 uid, gid, NULL, NULL, 0);
2187 }
2188
2189 struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name,
2190                                    u32 mode, u32 uid, u32 gid)
2191 {
2192         return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
2193                                 mode, uid, gid, NULL, NULL, 0);
2194 }
2195
2196 struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
2197                                        const YCHAR *name, u32 mode, u32 uid,
2198                                        u32 gid, u32 rdev)
2199 {
2200         return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
2201                                 uid, gid, NULL, NULL, rdev);
2202 }
2203
2204 struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
2205                                        const YCHAR *name, u32 mode, u32 uid,
2206                                        u32 gid, const YCHAR *alias)
2207 {
2208         return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
2209                                 uid, gid, NULL, alias, 0);
2210 }
2211
2212 /* yaffs_link_obj returns the object id of the equivalent object.*/
2213 struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name,
2214                                  struct yaffs_obj *equiv_obj)
2215 {
2216         /* Get the real object in case we were fed a hard link obj */
2217         equiv_obj = yaffs_get_equivalent_obj(equiv_obj);
2218
2219         if (yaffs_create_obj(YAFFS_OBJECT_TYPE_HARDLINK,
2220                         parent, name, 0, 0, 0,
2221                         equiv_obj, NULL, 0))
2222                 return equiv_obj;
2223
2224         return NULL;
2225
2226 }
2227
2228
2229
2230 /*---------------------- Block Management and Page Allocation -------------*/
2231
2232 static void yaffs_deinit_blocks(struct yaffs_dev *dev)
2233 {
2234         if (dev->block_info_alt && dev->block_info)
2235                 vfree(dev->block_info);
2236         else
2237                 kfree(dev->block_info);
2238
2239         dev->block_info_alt = 0;
2240
2241         dev->block_info = NULL;
2242
2243         if (dev->chunk_bits_alt && dev->chunk_bits)
2244                 vfree(dev->chunk_bits);
2245         else
2246                 kfree(dev->chunk_bits);
2247         dev->chunk_bits_alt = 0;
2248         dev->chunk_bits = NULL;
2249 }
2250
2251 static int yaffs_init_blocks(struct yaffs_dev *dev)
2252 {
2253         int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
2254
2255         dev->block_info = NULL;
2256         dev->chunk_bits = NULL;
2257         dev->alloc_block = -1;  /* force it to get a new one */
2258
2259         /* If the first allocation strategy fails, thry the alternate one */
2260         dev->block_info =
2261                 kmalloc(n_blocks * sizeof(struct yaffs_block_info), GFP_NOFS);
2262         if (!dev->block_info) {
2263                 dev->block_info =
2264                     vmalloc(n_blocks * sizeof(struct yaffs_block_info));
2265                 dev->block_info_alt = 1;
2266         } else {
2267                 dev->block_info_alt = 0;
2268         }
2269
2270         if (!dev->block_info)
2271                 goto alloc_error;
2272
2273         /* Set up dynamic blockinfo stuff. Round up bytes. */
2274         dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8;
2275         dev->chunk_bits =
2276                 kmalloc(dev->chunk_bit_stride * n_blocks, GFP_NOFS);
2277         if (!dev->chunk_bits) {
2278                 dev->chunk_bits =
2279                     vmalloc(dev->chunk_bit_stride * n_blocks);
2280                 dev->chunk_bits_alt = 1;
2281         } else {
2282                 dev->chunk_bits_alt = 0;
2283         }
2284         if (!dev->chunk_bits)
2285                 goto alloc_error;
2286
2287
2288         memset(dev->block_info, 0, n_blocks * sizeof(struct yaffs_block_info));
2289         memset(dev->chunk_bits, 0, dev->chunk_bit_stride * n_blocks);
2290         return YAFFS_OK;
2291
2292 alloc_error:
2293         yaffs_deinit_blocks(dev);
2294         return YAFFS_FAIL;
2295 }
2296
2297
2298 void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no)
2299 {
2300         struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no);
2301         int erased_ok = 0;
2302         int i;
2303
2304         /* If the block is still healthy erase it and mark as clean.
2305          * If the block has had a data failure, then retire it.
2306          */
2307
2308         yaffs_trace(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
2309                 "yaffs_block_became_dirty block %d state %d %s",
2310                 block_no, bi->block_state,
2311                 (bi->needs_retiring) ? "needs retiring" : "");
2312
2313         yaffs2_clear_oldest_dirty_seq(dev, bi);
2314
2315         bi->block_state = YAFFS_BLOCK_STATE_DIRTY;
2316
2317         /* If this is the block being garbage collected then stop gc'ing */
2318         if (block_no == dev->gc_block)
2319                 dev->gc_block = 0;
2320
2321         /* If this block is currently the best candidate for gc
2322          * then drop as a candidate */
2323         if (block_no == dev->gc_dirtiest) {
2324                 dev->gc_dirtiest = 0;
2325                 dev->gc_pages_in_use = 0;
2326         }
2327
2328         if (!bi->needs_retiring) {
2329                 yaffs2_checkpt_invalidate(dev);
2330                 erased_ok = yaffs_erase_block(dev, block_no);
2331                 if (!erased_ok) {
2332                         dev->n_erase_failures++;
2333                         yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
2334                           "**>> Erasure failed %d", block_no);
2335                 }
2336         }
2337
2338         /* Verify erasure if needed */
2339         if (erased_ok &&
2340             ((yaffs_trace_mask & YAFFS_TRACE_ERASE) ||
2341              !yaffs_skip_verification(dev))) {
2342                 for (i = 0; i < dev->param.chunks_per_block; i++) {
2343                         if (!yaffs_check_chunk_erased(dev,
2344                                 block_no * dev->param.chunks_per_block + i)) {
2345                                 yaffs_trace(YAFFS_TRACE_ERROR,
2346                                         ">>Block %d erasure supposedly OK, but chunk %d not erased",
2347                                         block_no, i);
2348                         }
2349                 }
2350         }
2351
2352         if (!erased_ok) {
2353                 /* We lost a block of free space */
2354                 dev->n_free_chunks -= dev->param.chunks_per_block;
2355                 yaffs_retire_block(dev, block_no);
2356                 yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
2357                         "**>> Block %d retired", block_no);
2358                 return;
2359         }
2360
2361         /* Clean it up... */
2362         bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
2363         bi->seq_number = 0;
2364         dev->n_erased_blocks++;
2365         bi->pages_in_use = 0;
2366         bi->soft_del_pages = 0;
2367         bi->has_shrink_hdr = 0;
2368         bi->skip_erased_check = 1;      /* Clean, so no need to check */
2369         bi->gc_prioritise = 0;
2370         bi->has_summary = 0;
2371
2372         yaffs_clear_chunk_bits(dev, block_no);
2373
2374         yaffs_trace(YAFFS_TRACE_ERASE, "Erased block %d", block_no);
2375 }
2376
2377 static inline int yaffs_gc_process_chunk(struct yaffs_dev *dev,
2378                                         struct yaffs_block_info *bi,
2379                                         int old_chunk, u8 *buffer)
2380 {
2381         int new_chunk;
2382         int mark_flash = 1;
2383         struct yaffs_ext_tags tags;
2384         struct yaffs_obj *object;
2385         int matching_chunk;
2386         int ret_val = YAFFS_OK;
2387
2388         memset(&tags, 0, sizeof(tags));
2389         yaffs_rd_chunk_tags_nand(dev, old_chunk,
2390                                  buffer, &tags);
2391         object = yaffs_find_by_number(dev, tags.obj_id);
2392
2393         yaffs_trace(YAFFS_TRACE_GC_DETAIL,
2394                 "Collecting chunk in block %d, %d %d %d ",
2395                 dev->gc_chunk, tags.obj_id,
2396                 tags.chunk_id, tags.n_bytes);
2397
2398         if (object && !yaffs_skip_verification(dev)) {
2399                 if (tags.chunk_id == 0)
2400                         matching_chunk =
2401                             object->hdr_chunk;
2402                 else if (object->soft_del)
2403                         /* Defeat the test */
2404                         matching_chunk = old_chunk;
2405                 else
2406                         matching_chunk =
2407                             yaffs_find_chunk_in_file
2408                             (object, tags.chunk_id,
2409                              NULL);
2410
2411                 if (old_chunk != matching_chunk)
2412                         yaffs_trace(YAFFS_TRACE_ERROR,
2413                                 "gc: page in gc mismatch: %d %d %d %d",
2414                                 old_chunk,
2415                                 matching_chunk,
2416                                 tags.obj_id,
2417                                 tags.chunk_id);
2418         }
2419
2420         if (!object) {
2421                 yaffs_trace(YAFFS_TRACE_ERROR,
2422                         "page %d in gc has no object: %d %d %d ",
2423                         old_chunk,
2424                         tags.obj_id, tags.chunk_id,
2425                         tags.n_bytes);
2426         }
2427
2428         if (object &&
2429             object->deleted &&
2430             object->soft_del && tags.chunk_id != 0) {
2431                 /* Data chunk in a soft deleted file,
2432                  * throw it away.
2433                  * It's a soft deleted data chunk,
2434                  * No need to copy this, just forget
2435                  * about it and fix up the object.
2436                  */
2437
2438                 /* Free chunks already includes
2439                  * softdeleted chunks, how ever this
2440                  * chunk is going to soon be really
2441                  * deleted which will increment free
2442                  * chunks. We have to decrement free
2443                  * chunks so this works out properly.
2444                  */
2445                 dev->n_free_chunks--;
2446                 bi->soft_del_pages--;
2447
2448                 object->n_data_chunks--;
2449                 if (object->n_data_chunks <= 0) {
2450                         /* remeber to clean up obj */
2451                         dev->gc_cleanup_list[dev->n_clean_ups] = tags.obj_id;
2452                         dev->n_clean_ups++;
2453                 }
2454                 mark_flash = 0;
2455         } else if (object) {
2456                 /* It's either a data chunk in a live
2457                  * file or an ObjectHeader, so we're
2458                  * interested in it.
2459                  * NB Need to keep the ObjectHeaders of
2460                  * deleted files until the whole file
2461                  * has been deleted off
2462                  */
2463                 tags.serial_number++;
2464                 dev->n_gc_copies++;
2465
2466                 if (tags.chunk_id == 0) {
2467                         /* It is an object Id,
2468                          * We need to nuke the
2469                          * shrinkheader flags since its
2470                          * work is done.
2471                          * Also need to clean up
2472                          * shadowing.
2473                          */
2474                         struct yaffs_obj_hdr *oh;
2475                         oh = (struct yaffs_obj_hdr *) buffer;
2476
2477                         oh->is_shrink = 0;
2478                         tags.extra_is_shrink = 0;
2479                         oh->shadows_obj = 0;
2480                         oh->inband_shadowed_obj_id = 0;
2481                         tags.extra_shadows = 0;
2482
2483                         /* Update file size */
2484                         if (object->variant_type == YAFFS_OBJECT_TYPE_FILE) {
2485                                 yaffs_oh_size_load(oh,
2486                                     object->variant.file_variant.file_size);
2487                                 tags.extra_file_size =
2488                                     object->variant.file_variant.file_size;
2489                         }
2490
2491                         yaffs_verify_oh(object, oh, &tags, 1);
2492                         new_chunk =
2493                             yaffs_write_new_chunk(dev, (u8 *) oh, &tags, 1);
2494                 } else {
2495                         new_chunk =
2496                             yaffs_write_new_chunk(dev, buffer, &tags, 1);
2497                 }
2498
2499                 if (new_chunk < 0) {
2500                         ret_val = YAFFS_FAIL;
2501                 } else {
2502
2503                         /* Now fix up the Tnodes etc. */
2504
2505                         if (tags.chunk_id == 0) {
2506                                 /* It's a header */
2507                                 object->hdr_chunk = new_chunk;
2508                                 object->serial = tags.serial_number;
2509                         } else {
2510                                 /* It's a data chunk */
2511                                 yaffs_put_chunk_in_file(object, tags.chunk_id,
2512                                                         new_chunk, 0);
2513                         }
2514                 }
2515         }
2516         if (ret_val == YAFFS_OK)
2517                 yaffs_chunk_del(dev, old_chunk, mark_flash, __LINE__);
2518         return ret_val;
2519 }
2520
2521 static int yaffs_gc_block(struct yaffs_dev *dev, int block, int whole_block)
2522 {
2523         int old_chunk;
2524         int ret_val = YAFFS_OK;
2525         int i;
2526         int is_checkpt_block;
2527         int max_copies;
2528         int chunks_before = yaffs_get_erased_chunks(dev);
2529         int chunks_after;
2530         struct yaffs_block_info *bi = yaffs_get_block_info(dev, block);
2531
2532         is_checkpt_block = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT);
2533
2534         yaffs_trace(YAFFS_TRACE_TRACING,
2535                 "Collecting block %d, in use %d, shrink %d, whole_block %d",
2536                 block, bi->pages_in_use, bi->has_shrink_hdr,
2537                 whole_block);
2538
2539         /*yaffs_verify_free_chunks(dev); */
2540
2541         if (bi->block_state == YAFFS_BLOCK_STATE_FULL)
2542                 bi->block_state = YAFFS_BLOCK_STATE_COLLECTING;
2543
2544         bi->has_shrink_hdr = 0; /* clear the flag so that the block can erase */
2545
2546         dev->gc_disable = 1;
2547
2548         yaffs_summary_gc(dev, block);
2549
2550         if (is_checkpt_block || !yaffs_still_some_chunks(dev, block)) {
2551                 yaffs_trace(YAFFS_TRACE_TRACING,
2552                         "Collecting block %d that has no chunks in use",
2553                         block);
2554                 yaffs_block_became_dirty(dev, block);
2555         } else {
2556
2557                 u8 *buffer = yaffs_get_temp_buffer(dev);
2558
2559                 yaffs_verify_blk(dev, bi, block);
2560
2561                 max_copies = (whole_block) ? dev->param.chunks_per_block : 5;
2562                 old_chunk = block * dev->param.chunks_per_block + dev->gc_chunk;
2563
2564                 for (/* init already done */ ;
2565                      ret_val == YAFFS_OK &&
2566                      dev->gc_chunk < dev->param.chunks_per_block &&
2567                      (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) &&
2568                      max_copies > 0;
2569                      dev->gc_chunk++, old_chunk++) {
2570                         if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) {
2571                                 /* Page is in use and might need to be copied */
2572                                 max_copies--;
2573                                 ret_val = yaffs_gc_process_chunk(dev, bi,
2574                                                         old_chunk, buffer);
2575                         }
2576                 }
2577                 yaffs_release_temp_buffer(dev, buffer);
2578         }
2579
2580         yaffs_verify_collected_blk(dev, bi, block);
2581
2582         if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
2583                 /*
2584                  * The gc did not complete. Set block state back to FULL
2585                  * because checkpointing does not restore gc.
2586                  */
2587                 bi->block_state = YAFFS_BLOCK_STATE_FULL;
2588         } else {
2589                 /* The gc completed. */
2590                 /* Do any required cleanups */
2591                 for (i = 0; i < dev->n_clean_ups; i++) {
2592                         /* Time to delete the file too */
2593                         struct yaffs_obj *object =
2594                             yaffs_find_by_number(dev, dev->gc_cleanup_list[i]);
2595                         if (object) {
2596                                 yaffs_free_tnode(dev,
2597                                           object->variant.file_variant.top);
2598                                 object->variant.file_variant.top = NULL;
2599                                 yaffs_trace(YAFFS_TRACE_GC,
2600                                         "yaffs: About to finally delete object %d",
2601                                         object->obj_id);
2602                                 yaffs_generic_obj_del(object);
2603                                 object->my_dev->n_deleted_files--;
2604                         }
2605
2606                 }
2607                 chunks_after = yaffs_get_erased_chunks(dev);
2608                 if (chunks_before >= chunks_after)
2609                         yaffs_trace(YAFFS_TRACE_GC,
2610                                 "gc did not increase free chunks before %d after %d",
2611                                 chunks_before, chunks_after);
2612                 dev->gc_block = 0;
2613                 dev->gc_chunk = 0;
2614                 dev->n_clean_ups = 0;
2615         }
2616
2617         dev->gc_disable = 0;
2618
2619         return ret_val;
2620 }
2621
2622 /*
2623  * find_gc_block() selects the dirtiest block (or close enough)
2624  * for garbage collection.
2625  */
2626
2627 static unsigned yaffs_find_gc_block(struct yaffs_dev *dev,
2628                                     int aggressive, int background)
2629 {
2630         int i;
2631         int iterations;
2632         unsigned selected = 0;
2633         int prioritised = 0;
2634         int prioritised_exist = 0;
2635         struct yaffs_block_info *bi;
2636         int threshold;
2637
2638         /* First let's see if we need to grab a prioritised block */
2639         if (dev->has_pending_prioritised_gc && !aggressive) {
2640                 dev->gc_dirtiest = 0;
2641                 bi = dev->block_info;
2642                 for (i = dev->internal_start_block;
2643                      i <= dev->internal_end_block && !selected; i++) {
2644
2645                         if (bi->gc_prioritise) {
2646                                 prioritised_exist = 1;
2647                                 if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
2648                                     yaffs_block_ok_for_gc(dev, bi)) {
2649                                         selected = i;
2650                                         prioritised = 1;
2651                                 }
2652                         }
2653                         bi++;
2654                 }
2655
2656                 /*
2657                  * If there is a prioritised block and none was selected then
2658                  * this happened because there is at least one old dirty block
2659                  * gumming up the works. Let's gc the oldest dirty block.
2660                  */
2661
2662                 if (prioritised_exist &&
2663                     !selected && dev->oldest_dirty_block > 0)
2664                         selected = dev->oldest_dirty_block;
2665
2666                 if (!prioritised_exist) /* None found, so we can clear this */
2667                         dev->has_pending_prioritised_gc = 0;
2668         }
2669
2670         /* If we're doing aggressive GC then we are happy to take a less-dirty
2671          * block, and search harder.
2672          * else (leasurely gc), then we only bother to do this if the
2673          * block has only a few pages in use.
2674          */
2675
2676         if (!selected) {
2677                 int pages_used;
2678                 int n_blocks =
2679                     dev->internal_end_block - dev->internal_start_block + 1;
2680                 if (aggressive) {
2681                         threshold = dev->param.chunks_per_block;
2682                         iterations = n_blocks;
2683                 } else {
2684                         int max_threshold;
2685
2686                         if (background)
2687                                 max_threshold = dev->param.chunks_per_block / 2;
2688                         else
2689                                 max_threshold = dev->param.chunks_per_block / 8;
2690
2691                         if (max_threshold < YAFFS_GC_PASSIVE_THRESHOLD)
2692                                 max_threshold = YAFFS_GC_PASSIVE_THRESHOLD;
2693
2694                         threshold = background ? (dev->gc_not_done + 2) * 2 : 0;
2695                         if (threshold < YAFFS_GC_PASSIVE_THRESHOLD)
2696                                 threshold = YAFFS_GC_PASSIVE_THRESHOLD;
2697                         if (threshold > max_threshold)
2698                                 threshold = max_threshold;
2699
2700                         iterations = n_blocks / 16 + 1;
2701                         if (iterations > 100)
2702                                 iterations = 100;
2703                 }
2704
2705                 for (i = 0;
2706                      i < iterations &&
2707                      (dev->gc_dirtiest < 1 ||
2708                       dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH);
2709                      i++) {
2710                         dev->gc_block_finder++;
2711                         if (dev->gc_block_finder < dev->internal_start_block ||
2712                             dev->gc_block_finder > dev->internal_end_block)
2713                                 dev->gc_block_finder =
2714                                     dev->internal_start_block;
2715
2716                         bi = yaffs_get_block_info(dev, dev->gc_block_finder);
2717
2718                         pages_used = bi->pages_in_use - bi->soft_del_pages;
2719
2720                         if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
2721                             pages_used < dev->param.chunks_per_block &&
2722                             (dev->gc_dirtiest < 1 ||
2723                              pages_used < dev->gc_pages_in_use) &&
2724                             yaffs_block_ok_for_gc(dev, bi)) {
2725                                 dev->gc_dirtiest = dev->gc_block_finder;
2726                                 dev->gc_pages_in_use = pages_used;
2727                         }
2728                 }
2729
2730                 if (dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold)
2731                         selected = dev->gc_dirtiest;
2732         }
2733
2734         /*
2735          * If nothing has been selected for a while, try the oldest dirty
2736          * because that's gumming up the works.
2737          */
2738
2739         if (!selected && dev->param.is_yaffs2 &&
2740             dev->gc_not_done >= (background ? 10 : 20)) {
2741                 yaffs2_find_oldest_dirty_seq(dev);
2742                 if (dev->oldest_dirty_block > 0) {
2743                         selected = dev->oldest_dirty_block;
2744                         dev->gc_dirtiest = selected;
2745                         dev->oldest_dirty_gc_count++;
2746                         bi = yaffs_get_block_info(dev, selected);
2747                         dev->gc_pages_in_use =
2748                             bi->pages_in_use - bi->soft_del_pages;
2749                 } else {
2750                         dev->gc_not_done = 0;
2751                 }
2752         }
2753
2754         if (selected) {
2755                 yaffs_trace(YAFFS_TRACE_GC,
2756                         "GC Selected block %d with %d free, prioritised:%d",
2757                         selected,
2758                         dev->param.chunks_per_block - dev->gc_pages_in_use,
2759                         prioritised);
2760
2761                 dev->n_gc_blocks++;
2762                 if (background)
2763                         dev->bg_gcs++;
2764
2765                 dev->gc_dirtiest = 0;
2766                 dev->gc_pages_in_use = 0;
2767                 dev->gc_not_done = 0;
2768                 if (dev->refresh_skip > 0)
2769                         dev->refresh_skip--;
2770         } else {
2771                 dev->gc_not_done++;
2772                 yaffs_trace(YAFFS_TRACE_GC,
2773                         "GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s",
2774                         dev->gc_block_finder, dev->gc_not_done, threshold,
2775                         dev->gc_dirtiest, dev->gc_pages_in_use,
2776                         dev->oldest_dirty_block, background ? " bg" : "");
2777         }
2778
2779         return selected;
2780 }
2781
2782 /* New garbage collector
2783  * If we're very low on erased blocks then we do aggressive garbage collection
2784  * otherwise we do "leasurely" garbage collection.
2785  * Aggressive gc looks further (whole array) and will accept less dirty blocks.
2786  * Passive gc only inspects smaller areas and only accepts more dirty blocks.
2787  *
2788  * The idea is to help clear out space in a more spread-out manner.
2789  * Dunno if it really does anything useful.
2790  */
2791 static int yaffs_check_gc(struct yaffs_dev *dev, int background)
2792 {
2793         int aggressive = 0;
2794         int gc_ok = YAFFS_OK;
2795         int max_tries = 0;
2796         int min_erased;
2797         int erased_chunks;
2798         int checkpt_block_adjust;
2799
2800         if (dev->param.gc_control && (dev->param.gc_control(dev) & 1) == 0)
2801                 return YAFFS_OK;
2802
2803         if (dev->gc_disable)
2804                 /* Bail out so we don't get recursive gc */
2805                 return YAFFS_OK;
2806
2807         /* This loop should pass the first time.
2808          * Only loops here if the collection does not increase space.
2809          */
2810
2811         do {
2812                 max_tries++;
2813
2814                 checkpt_block_adjust = yaffs_calc_checkpt_blocks_required(dev);
2815
2816                 min_erased =
2817                     dev->param.n_reserved_blocks + checkpt_block_adjust + 1;
2818                 erased_chunks =
2819                     dev->n_erased_blocks * dev->param.chunks_per_block;
2820
2821                 /* If we need a block soon then do aggressive gc. */
2822                 if (dev->n_erased_blocks < min_erased)
2823                         aggressive = 1;
2824                 else {
2825                         if (!background
2826                             && erased_chunks > (dev->n_free_chunks / 4))
2827                                 break;
2828
2829                         if (dev->gc_skip > 20)
2830                                 dev->gc_skip = 20;
2831                         if (erased_chunks < dev->n_free_chunks / 2 ||
2832                             dev->gc_skip < 1 || background)
2833                                 aggressive = 0;
2834                         else {
2835                                 dev->gc_skip--;
2836                                 break;
2837                         }
2838                 }
2839
2840                 dev->gc_skip = 5;
2841
2842                 /* If we don't already have a block being gc'd then see if we
2843                  * should start another */
2844
2845                 if (dev->gc_block < 1 && !aggressive) {
2846                         dev->gc_block = yaffs2_find_refresh_block(dev);
2847                         dev->gc_chunk = 0;
2848                         dev->n_clean_ups = 0;
2849                 }
2850                 if (dev->gc_block < 1) {
2851                         dev->gc_block =
2852                             yaffs_find_gc_block(dev, aggressive, background);
2853                         dev->gc_chunk = 0;
2854                         dev->n_clean_ups = 0;
2855                 }
2856
2857                 if (dev->gc_block > 0) {
2858                         dev->all_gcs++;
2859                         if (!aggressive)
2860                                 dev->passive_gc_count++;
2861
2862                         yaffs_trace(YAFFS_TRACE_GC,
2863                                 "yaffs: GC n_erased_blocks %d aggressive %d",
2864                                 dev->n_erased_blocks, aggressive);
2865
2866                         gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive);
2867                 }
2868
2869                 if (dev->n_erased_blocks < (dev->param.n_reserved_blocks) &&
2870                     dev->gc_block > 0) {
2871                         yaffs_trace(YAFFS_TRACE_GC,
2872                                 "yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d",
2873                                 dev->n_erased_blocks, max_tries,
2874                                 dev->gc_block);
2875                 }
2876         } while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) &&
2877                  (dev->gc_block > 0) && (max_tries < 2));
2878
2879         return aggressive ? gc_ok : YAFFS_OK;
2880 }
2881
2882 /*
2883  * yaffs_bg_gc()
2884  * Garbage collects. Intended to be called from a background thread.
2885  * Returns non-zero if at least half the free chunks are erased.
2886  */
2887 int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency)
2888 {
2889         int erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block;
2890
2891         yaffs_trace(YAFFS_TRACE_BACKGROUND, "Background gc %u", urgency);
2892
2893         yaffs_check_gc(dev, 1);
2894         return erased_chunks > dev->n_free_chunks / 2;
2895 }
2896
2897 /*-------------------- Data file manipulation -----------------*/
2898
2899 static int yaffs_rd_data_obj(struct yaffs_obj *in, int inode_chunk, u8 * buffer)
2900 {
2901         int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL);
2902
2903         if (nand_chunk >= 0)
2904                 return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk,
2905                                                 buffer, NULL);
2906         else {
2907                 yaffs_trace(YAFFS_TRACE_NANDACCESS,
2908                         "Chunk %d not found zero instead",
2909                         nand_chunk);
2910                 /* get sane (zero) data if you read a hole */
2911                 memset(buffer, 0, in->my_dev->data_bytes_per_chunk);
2912                 return 0;
2913         }
2914
2915 }
2916
2917 void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
2918                      int lyn)
2919 {
2920         int block;
2921         int page;
2922         struct yaffs_ext_tags tags;
2923         struct yaffs_block_info *bi;
2924
2925         if (chunk_id <= 0)
2926                 return;
2927
2928         dev->n_deletions++;
2929         block = chunk_id / dev->param.chunks_per_block;
2930         page = chunk_id % dev->param.chunks_per_block;
2931
2932         if (!yaffs_check_chunk_bit(dev, block, page))
2933                 yaffs_trace(YAFFS_TRACE_VERIFY,
2934                         "Deleting invalid chunk %d", chunk_id);
2935
2936         bi = yaffs_get_block_info(dev, block);
2937
2938         yaffs2_update_oldest_dirty_seq(dev, block, bi);
2939
2940         yaffs_trace(YAFFS_TRACE_DELETION,
2941                 "line %d delete of chunk %d",
2942                 lyn, chunk_id);
2943
2944         if (!dev->param.is_yaffs2 && mark_flash &&
2945             bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) {
2946
2947                 memset(&tags, 0, sizeof(tags));
2948                 tags.is_deleted = 1;
2949                 yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags);
2950                 yaffs_handle_chunk_update(dev, chunk_id, &tags);
2951         } else {
2952                 dev->n_unmarked_deletions++;
2953         }
2954
2955         /* Pull out of the management area.
2956          * If the whole block became dirty, this will kick off an erasure.
2957          */
2958         if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING ||
2959             bi->block_state == YAFFS_BLOCK_STATE_FULL ||
2960             bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
2961             bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
2962                 dev->n_free_chunks++;
2963                 yaffs_clear_chunk_bit(dev, block, page);
2964                 bi->pages_in_use--;
2965
2966                 if (bi->pages_in_use == 0 &&
2967                     !bi->has_shrink_hdr &&
2968                     bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING &&
2969                     bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCAN) {
2970                         yaffs_block_became_dirty(dev, block);
2971                 }
2972         }
2973 }
2974
2975 static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
2976                              const u8 *buffer, int n_bytes, int use_reserve)
2977 {
2978         /* Find old chunk Need to do this to get serial number
2979          * Write new one and patch into tree.
2980          * Invalidate old tags.
2981          */
2982
2983         int prev_chunk_id;
2984         struct yaffs_ext_tags prev_tags;
2985         int new_chunk_id;
2986         struct yaffs_ext_tags new_tags;
2987         struct yaffs_dev *dev = in->my_dev;
2988
2989         yaffs_check_gc(dev, 0);
2990
2991         /* Get the previous chunk at this location in the file if it exists.
2992          * If it does not exist then put a zero into the tree. This creates
2993          * the tnode now, rather than later when it is harder to clean up.
2994          */
2995         prev_chunk_id = yaffs_find_chunk_in_file(in, inode_chunk, &prev_tags);
2996         if (prev_chunk_id < 1 &&
2997             !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0))
2998                 return 0;
2999
3000         /* Set up new tags */
3001         memset(&new_tags, 0, sizeof(new_tags));
3002
3003         new_tags.chunk_id = inode_chunk;
3004         new_tags.obj_id = in->obj_id;
3005         new_tags.serial_number =
3006             (prev_chunk_id > 0) ? prev_tags.serial_number + 1 : 1;
3007         new_tags.n_bytes = n_bytes;
3008
3009         if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) {
3010                 yaffs_trace(YAFFS_TRACE_ERROR,
3011                   "Writing %d bytes to chunk!!!!!!!!!",
3012                    n_bytes);
3013                 BUG();
3014         }
3015
3016         new_chunk_id =
3017             yaffs_write_new_chunk(dev, buffer, &new_tags, use_reserve);
3018
3019         if (new_chunk_id > 0) {
3020                 yaffs_put_chunk_in_file(in, inode_chunk, new_chunk_id, 0);
3021
3022                 if (prev_chunk_id > 0)
3023                         yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
3024
3025                 yaffs_verify_file_sane(in);
3026         }
3027         return new_chunk_id;
3028
3029 }
3030
3031
3032
3033 static int yaffs_do_xattrib_mod(struct yaffs_obj *obj, int set,
3034                                 const YCHAR *name, const void *value, int size,
3035                                 int flags)
3036 {
3037         struct yaffs_xattr_mod xmod;
3038         int result;
3039
3040         xmod.set = set;
3041         xmod.name = name;
3042         xmod.data = value;
3043         xmod.size = size;
3044         xmod.flags = flags;
3045         xmod.result = -ENOSPC;
3046
3047         result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod);
3048
3049         if (result > 0)
3050                 return xmod.result;
3051         else
3052                 return -ENOSPC;
3053 }
3054
3055 static int yaffs_apply_xattrib_mod(struct yaffs_obj *obj, char *buffer,
3056                                    struct yaffs_xattr_mod *xmod)
3057 {
3058         int retval = 0;
3059         int x_offs = sizeof(struct yaffs_obj_hdr);
3060         struct yaffs_dev *dev = obj->my_dev;
3061         int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
3062         char *x_buffer = buffer + x_offs;
3063
3064         if (xmod->set)
3065                 retval =
3066                     nval_set(x_buffer, x_size, xmod->name, xmod->data,
3067                              xmod->size, xmod->flags);
3068         else
3069                 retval = nval_del(x_buffer, x_size, xmod->name);
3070
3071         obj->has_xattr = nval_hasvalues(x_buffer, x_size);
3072         obj->xattr_known = 1;
3073         xmod->result = retval;
3074
3075         return retval;
3076 }
3077
3078 static int yaffs_do_xattrib_fetch(struct yaffs_obj *obj, const YCHAR *name,
3079                                   void *value, int size)
3080 {
3081         char *buffer = NULL;
3082         int result;
3083         struct yaffs_ext_tags tags;
3084         struct yaffs_dev *dev = obj->my_dev;
3085         int x_offs = sizeof(struct yaffs_obj_hdr);
3086         int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
3087         char *x_buffer;
3088         int retval = 0;
3089
3090         if (obj->hdr_chunk < 1)
3091                 return -ENODATA;
3092
3093         /* If we know that the object has no xattribs then don't do all the
3094          * reading and parsing.
3095          */
3096         if (obj->xattr_known && !obj->has_xattr) {
3097                 if (name)
3098                         return -ENODATA;
3099                 else
3100                         return 0;
3101         }
3102
3103         buffer = (char *)yaffs_get_temp_buffer(dev);
3104         if (!buffer)
3105                 return -ENOMEM;
3106
3107         result =
3108             yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, (u8 *) buffer, &tags);
3109
3110         if (result != YAFFS_OK)
3111                 retval = -ENOENT;
3112         else {
3113                 x_buffer = buffer + x_offs;
3114
3115                 if (!obj->xattr_known) {
3116                         obj->has_xattr = nval_hasvalues(x_buffer, x_size);
3117                         obj->xattr_known = 1;
3118                 }
3119
3120                 if (name)
3121                         retval = nval_get(x_buffer, x_size, name, value, size);
3122                 else
3123                         retval = nval_list(x_buffer, x_size, value, size);
3124         }
3125         yaffs_release_temp_buffer(dev, (u8 *) buffer);
3126         return retval;
3127 }
3128
3129 int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name,
3130                       const void *value, int size, int flags)
3131 {
3132         return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags);
3133 }
3134
3135 int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name)
3136 {
3137         return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0);
3138 }
3139
3140 int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value,
3141                       int size)
3142 {
3143         return yaffs_do_xattrib_fetch(obj, name, value, size);
3144 }
3145
3146 int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size)
3147 {
3148         return yaffs_do_xattrib_fetch(obj, NULL, buffer, size);
3149 }
3150
3151 static void yaffs_check_obj_details_loaded(struct yaffs_obj *in)
3152 {
3153         u8 *buf;
3154         struct yaffs_obj_hdr *oh;
3155         struct yaffs_dev *dev;
3156         struct yaffs_ext_tags tags;
3157         int result;
3158         int alloc_failed = 0;
3159
3160         if (!in || !in->lazy_loaded || in->hdr_chunk < 1)
3161                 return;
3162
3163         dev = in->my_dev;
3164         in->lazy_loaded = 0;
3165         buf = yaffs_get_temp_buffer(dev);
3166
3167         result = yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, buf, &tags);
3168         oh = (struct yaffs_obj_hdr *)buf;
3169
3170         in->yst_mode = oh->yst_mode;
3171         yaffs_load_attribs(in, oh);
3172         yaffs_set_obj_name_from_oh(in, oh);
3173
3174         if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
3175                 in->variant.symlink_variant.alias =
3176                     yaffs_clone_str(oh->alias);
3177                 if (!in->variant.symlink_variant.alias)
3178                         alloc_failed = 1;       /* Not returned */
3179         }
3180         yaffs_release_temp_buffer(dev, buf);
3181 }
3182
3183 static void yaffs_load_name_from_oh(struct yaffs_dev *dev, YCHAR *name,
3184                                     const YCHAR *oh_name, int buff_size)
3185 {
3186 #ifdef CONFIG_YAFFS_AUTO_UNICODE
3187         if (dev->param.auto_unicode) {
3188                 if (*oh_name) {
3189                         /* It is an ASCII name, do an ASCII to
3190                          * unicode conversion */
3191                         const char *ascii_oh_name = (const char *)oh_name;
3192                         int n = buff_size - 1;
3193                         while (n > 0 && *ascii_oh_name) {
3194                                 *name = *ascii_oh_name;
3195                                 name++;
3196                                 ascii_oh_name++;
3197                                 n--;
3198                         }
3199                 } else {
3200                         strncpy(name, oh_name + 1, buff_size - 1);
3201                 }
3202         } else {
3203 #else
3204         (void) dev;
3205         {
3206 #endif
3207                 strncpy(name, oh_name, buff_size - 1);
3208         }
3209 }
3210
3211 static void yaffs_load_oh_from_name(struct yaffs_dev *dev, YCHAR *oh_name,
3212                                     const YCHAR *name)
3213 {
3214 #ifdef CONFIG_YAFFS_AUTO_UNICODE
3215
3216         int is_ascii;
3217         YCHAR *w;
3218
3219         if (dev->param.auto_unicode) {
3220
3221                 is_ascii = 1;
3222                 w = name;
3223
3224                 /* Figure out if the name will fit in ascii character set */
3225                 while (is_ascii && *w) {
3226                         if ((*w) & 0xff00)
3227                                 is_ascii = 0;
3228                         w++;
3229                 }
3230
3231                 if (is_ascii) {
3232                         /* It is an ASCII name, so convert unicode to ascii */
3233                         char *ascii_oh_name = (char *)oh_name;
3234                         int n = YAFFS_MAX_NAME_LENGTH - 1;
3235                         while (n > 0 && *name) {
3236                                 *ascii_oh_name = *name;
3237                                 name++;
3238                                 ascii_oh_name++;
3239                                 n--;
3240                         }
3241                 } else {
3242                         /* Unicode name, so save starting at the second YCHAR */
3243                         *oh_name = 0;
3244                         strncpy(oh_name + 1, name, YAFFS_MAX_NAME_LENGTH - 2);
3245                 }
3246         } else {
3247 #else
3248         dev = dev;
3249         {
3250 #endif
3251                 strncpy(oh_name, name, YAFFS_MAX_NAME_LENGTH - 1);
3252         }
3253 }
3254
3255 /* UpdateObjectHeader updates the header on NAND for an object.
3256  * If name is not NULL, then that new name is used.
3257  */
3258 int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name, int force,
3259                     int is_shrink, int shadows, struct yaffs_xattr_mod *xmod)
3260 {
3261
3262         struct yaffs_block_info *bi;
3263         struct yaffs_dev *dev = in->my_dev;
3264         int prev_chunk_id;
3265         int ret_val = 0;
3266         int result = 0;
3267         int new_chunk_id;
3268         struct yaffs_ext_tags new_tags;
3269         struct yaffs_ext_tags old_tags;
3270         const YCHAR *alias = NULL;
3271         u8 *buffer = NULL;
3272         YCHAR old_name[YAFFS_MAX_NAME_LENGTH + 1];
3273         struct yaffs_obj_hdr *oh = NULL;
3274         loff_t file_size = 0;
3275
3276         strcpy(old_name, _Y("silly old name"));
3277
3278         if (in->fake && in != dev->root_dir && !force && !xmod)
3279                 return ret_val;
3280
3281         yaffs_check_gc(dev, 0);
3282         yaffs_check_obj_details_loaded(in);
3283
3284         buffer = yaffs_get_temp_buffer(in->my_dev);
3285         oh = (struct yaffs_obj_hdr *)buffer;
3286
3287         prev_chunk_id = in->hdr_chunk;
3288
3289         if (prev_chunk_id > 0) {
3290                 result = yaffs_rd_chunk_tags_nand(dev, prev_chunk_id,
3291                                                   buffer, &old_tags);
3292
3293                 yaffs_verify_oh(in, oh, &old_tags, 0);
3294                 memcpy(old_name, oh->name, sizeof(oh->name));
3295                 memset(buffer, 0xff, sizeof(struct yaffs_obj_hdr));
3296         } else {
3297                 memset(buffer, 0xff, dev->data_bytes_per_chunk);
3298         }
3299
3300         oh->type = in->variant_type;
3301         oh->yst_mode = in->yst_mode;
3302         oh->shadows_obj = oh->inband_shadowed_obj_id = shadows;
3303
3304         yaffs_load_attribs_oh(oh, in);
3305
3306         if (in->parent)
3307                 oh->parent_obj_id = in->parent->obj_id;
3308         else
3309                 oh->parent_obj_id = 0;
3310
3311         if (name && *name) {
3312                 memset(oh->name, 0, sizeof(oh->name));
3313                 yaffs_load_oh_from_name(dev, oh->name, name);
3314         } else if (prev_chunk_id > 0) {
3315                 memcpy(oh->name, old_name, sizeof(oh->name));
3316         } else {
3317                 memset(oh->name, 0, sizeof(oh->name));
3318         }
3319
3320         oh->is_shrink = is_shrink;
3321
3322         switch (in->variant_type) {
3323         case YAFFS_OBJECT_TYPE_UNKNOWN:
3324                 /* Should not happen */
3325                 break;
3326         case YAFFS_OBJECT_TYPE_FILE:
3327                 if (oh->parent_obj_id != YAFFS_OBJECTID_DELETED &&
3328                     oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED)
3329                         file_size = in->variant.file_variant.file_size;
3330                 yaffs_oh_size_load(oh, file_size);
3331                 break;
3332         case YAFFS_OBJECT_TYPE_HARDLINK:
3333                 oh->equiv_id = in->variant.hardlink_variant.equiv_id;
3334                 break;
3335         case YAFFS_OBJECT_TYPE_SPECIAL:
3336                 /* Do nothing */
3337                 break;
3338         case YAFFS_OBJECT_TYPE_DIRECTORY:
3339                 /* Do nothing */
3340                 break;
3341         case YAFFS_OBJECT_TYPE_SYMLINK:
3342                 alias = in->variant.symlink_variant.alias;
3343                 if (!alias)
3344                         alias = _Y("no alias");
3345                 strncpy(oh->alias, alias, YAFFS_MAX_ALIAS_LENGTH);
3346                 oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
3347                 break;
3348         }
3349
3350         /* process any xattrib modifications */
3351         if (xmod)
3352                 yaffs_apply_xattrib_mod(in, (char *)buffer, xmod);
3353
3354         /* Tags */
3355         memset(&new_tags, 0, sizeof(new_tags));
3356         in->serial++;
3357         new_tags.chunk_id = 0;
3358         new_tags.obj_id = in->obj_id;
3359         new_tags.serial_number = in->serial;
3360
3361         /* Add extra info for file header */
3362         new_tags.extra_available = 1;
3363         new_tags.extra_parent_id = oh->parent_obj_id;
3364         new_tags.extra_file_size = file_size;
3365         new_tags.extra_is_shrink = oh->is_shrink;
3366         new_tags.extra_equiv_id = oh->equiv_id;
3367         new_tags.extra_shadows = (oh->shadows_obj > 0) ? 1 : 0;
3368         new_tags.extra_obj_type = in->variant_type;
3369         yaffs_verify_oh(in, oh, &new_tags, 1);
3370
3371         /* Create new chunk in NAND */
3372         new_chunk_id =
3373             yaffs_write_new_chunk(dev, buffer, &new_tags,
3374                                   (prev_chunk_id > 0) ? 1 : 0);
3375
3376         if (buffer)
3377                 yaffs_release_temp_buffer(dev, buffer);
3378
3379         if (new_chunk_id < 0)
3380                 return new_chunk_id;
3381
3382         in->hdr_chunk = new_chunk_id;
3383
3384         if (prev_chunk_id > 0)
3385                 yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
3386
3387         if (!yaffs_obj_cache_dirty(in))
3388                 in->dirty = 0;
3389
3390         /* If this was a shrink, then mark the block
3391          * that the chunk lives on */
3392         if (is_shrink) {
3393                 bi = yaffs_get_block_info(in->my_dev,
3394                                           new_chunk_id /
3395                                           in->my_dev->param.chunks_per_block);
3396                 bi->has_shrink_hdr = 1;
3397         }
3398
3399
3400         return new_chunk_id;
3401 }
3402
3403 /*--------------------- File read/write ------------------------
3404  * Read and write have very similar structures.
3405  * In general the read/write has three parts to it
3406  * An incomplete chunk to start with (if the read/write is not chunk-aligned)
3407  * Some complete chunks
3408  * An incomplete chunk to end off with
3409  *
3410  * Curve-balls: the first chunk might also be the last chunk.
3411  */
3412
3413 int yaffs_file_rd(struct yaffs_obj *in, u8 * buffer, loff_t offset, int n_bytes)
3414 {
3415         int chunk;
3416         u32 start;
3417         int n_copy;
3418         int n = n_bytes;
3419         int n_done = 0;
3420         struct yaffs_cache *cache;
3421         struct yaffs_dev *dev;
3422
3423         dev = in->my_dev;
3424
3425       &nbs