Fix some statics and remove unused function
[yaffs2.git] / yaffs_guts.c
1 /*
2  * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
3  *
4  * Copyright (C) 2002-2011 Aleph One Ltd.
5  *   for Toby Churchill Ltd and Brightstar Engineering
6  *
7  * Created by Charles Manning <charles@aleph1.co.uk>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13
14 #include "yportenv.h"
15 #include "yaffs_trace.h"
16
17 #include "yaffs_guts.h"
18 #include "yaffs_getblockinfo.h"
19 #include "yaffs_tagscompat.h"
20 #include "yaffs_nand.h"
21 #include "yaffs_yaffs1.h"
22 #include "yaffs_yaffs2.h"
23 #include "yaffs_bitmap.h"
24 #include "yaffs_verify.h"
25 #include "yaffs_nand.h"
26 #include "yaffs_packedtags2.h"
27 #include "yaffs_nameval.h"
28 #include "yaffs_allocator.h"
29 #include "yaffs_attribs.h"
30 #include "yaffs_summary.h"
31
32 /* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */
33 #define YAFFS_GC_GOOD_ENOUGH 2
34 #define YAFFS_GC_PASSIVE_THRESHOLD 4
35
36 #include "yaffs_ecc.h"
37
38 /* Forward declarations */
39
40 static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
41                              const u8 *buffer, int n_bytes, int use_reserve);
42
43
44
45 /* Function to calculate chunk and offset */
46
47 void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
48                                 int *chunk_out, u32 *offset_out)
49 {
50         int chunk;
51         u32 offset;
52
53         chunk = (u32) (addr >> dev->chunk_shift);
54
55         if (dev->chunk_div == 1) {
56                 /* easy power of 2 case */
57                 offset = (u32) (addr & dev->chunk_mask);
58         } else {
59                 /* Non power-of-2 case */
60
61                 loff_t chunk_base;
62
63                 chunk /= dev->chunk_div;
64
65                 chunk_base = ((loff_t) chunk) * dev->data_bytes_per_chunk;
66                 offset = (u32) (addr - chunk_base);
67         }
68
69         *chunk_out = chunk;
70         *offset_out = offset;
71 }
72
73 /* Function to return the number of shifts for a power of 2 greater than or
74  * equal to the given number
75  * Note we don't try to cater for all possible numbers and this does not have to
76  * be hellishly efficient.
77  */
78
79 static inline u32 calc_shifts_ceiling(u32 x)
80 {
81         int extra_bits;
82         int shifts;
83
84         shifts = extra_bits = 0;
85
86         while (x > 1) {
87                 if (x & 1)
88                         extra_bits++;
89                 x >>= 1;
90                 shifts++;
91         }
92
93         if (extra_bits)
94                 shifts++;
95
96         return shifts;
97 }
98
99 /* Function to return the number of shifts to get a 1 in bit 0
100  */
101
102 static inline u32 calc_shifts(u32 x)
103 {
104         u32 shifts;
105
106         shifts = 0;
107
108         if (!x)
109                 return 0;
110
111         while (!(x & 1)) {
112                 x >>= 1;
113                 shifts++;
114         }
115
116         return shifts;
117 }
118
119 /*
120  * Temporary buffer manipulations.
121  */
122
123 static int yaffs_init_tmp_buffers(struct yaffs_dev *dev)
124 {
125         int i;
126         u8 *buf = (u8 *) 1;
127
128         memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer));
129
130         for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
131                 dev->temp_buffer[i].in_use = 0;
132                 buf = kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
133                 dev->temp_buffer[i].buffer = buf;
134         }
135
136         return buf ? YAFFS_OK : YAFFS_FAIL;
137 }
138
139 u8 *yaffs_get_temp_buffer(struct yaffs_dev * dev)
140 {
141         int i;
142
143         dev->temp_in_use++;
144         if (dev->temp_in_use > dev->max_temp)
145                 dev->max_temp = dev->temp_in_use;
146
147         for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
148                 if (dev->temp_buffer[i].in_use == 0) {
149                         dev->temp_buffer[i].in_use = 1;
150                         return dev->temp_buffer[i].buffer;
151                 }
152         }
153
154         yaffs_trace(YAFFS_TRACE_BUFFERS, "Out of temp buffers");
155         /*
156          * If we got here then we have to allocate an unmanaged one
157          * This is not good.
158          */
159
160         dev->unmanaged_buffer_allocs++;
161         return kmalloc(dev->data_bytes_per_chunk, GFP_NOFS);
162
163 }
164
165 void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer)
166 {
167         int i;
168
169         dev->temp_in_use--;
170
171         for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
172                 if (dev->temp_buffer[i].buffer == buffer) {
173                         dev->temp_buffer[i].in_use = 0;
174                         return;
175                 }
176         }
177
178         if (buffer) {
179                 /* assume it is an unmanaged one. */
180                 yaffs_trace(YAFFS_TRACE_BUFFERS,
181                         "Releasing unmanaged temp buffer");
182                 kfree(buffer);
183                 dev->unmanaged_buffer_deallocs++;
184         }
185
186 }
187
188 /*
189  * Functions for robustisizing TODO
190  *
191  */
192
193 static void yaffs_handle_chunk_wr_ok(struct yaffs_dev *dev, int nand_chunk,
194                                      const u8 *data,
195                                      const struct yaffs_ext_tags *tags)
196 {
197         dev = dev;
198         nand_chunk = nand_chunk;
199         data = data;
200         tags = tags;
201 }
202
203 static void yaffs_handle_chunk_update(struct yaffs_dev *dev, int nand_chunk,
204                                       const struct yaffs_ext_tags *tags)
205 {
206         dev = dev;
207         nand_chunk = nand_chunk;
208         tags = tags;
209 }
210
211 void yaffs_handle_chunk_error(struct yaffs_dev *dev,
212                               struct yaffs_block_info *bi)
213 {
214         if (!bi->gc_prioritise) {
215                 bi->gc_prioritise = 1;
216                 dev->has_pending_prioritised_gc = 1;
217                 bi->chunk_error_strikes++;
218
219                 if (bi->chunk_error_strikes > 3) {
220                         bi->needs_retiring = 1; /* Too many stikes, so retire */
221                         yaffs_trace(YAFFS_TRACE_ALWAYS,
222                                 "yaffs: Block struck out");
223
224                 }
225         }
226 }
227
228 static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, int nand_chunk,
229                                         int erased_ok)
230 {
231         int flash_block = nand_chunk / dev->param.chunks_per_block;
232         struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
233
234         yaffs_handle_chunk_error(dev, bi);
235
236         if (erased_ok) {
237                 /* Was an actual write failure,
238                  * so mark the block for retirement.*/
239                 bi->needs_retiring = 1;
240                 yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
241                   "**>> Block %d needs retiring", flash_block);
242         }
243
244         /* Delete the chunk */
245         yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
246         yaffs_skip_rest_of_block(dev);
247 }
248
249 /*
250  * Verification code
251  */
252
253 /*
254  *  Simple hash function. Needs to have a reasonable spread
255  */
256
257 static inline int yaffs_hash_fn(int n)
258 {
259         if (n < 0)
260                 n = -n;
261         return n % YAFFS_NOBJECT_BUCKETS;
262 }
263
264 /*
265  * Access functions to useful fake objects.
266  * Note that root might have a presence in NAND if permissions are set.
267  */
268
269 struct yaffs_obj *yaffs_root(struct yaffs_dev *dev)
270 {
271         return dev->root_dir;
272 }
273
274 struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev)
275 {
276         return dev->lost_n_found;
277 }
278
279 /*
280  *  Erased NAND checking functions
281  */
282
283 int yaffs_check_ff(u8 *buffer, int n_bytes)
284 {
285         /* Horrible, slow implementation */
286         while (n_bytes--) {
287                 if (*buffer != 0xff)
288                         return 0;
289                 buffer++;
290         }
291         return 1;
292 }
293
294 static int yaffs_check_chunk_erased(struct yaffs_dev *dev, int nand_chunk)
295 {
296         int retval = YAFFS_OK;
297         u8 *data = yaffs_get_temp_buffer(dev);
298         struct yaffs_ext_tags tags;
299         int result;
300
301         result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags);
302
303         if (tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR)
304                 retval = YAFFS_FAIL;
305
306         if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) ||
307                 tags.chunk_used) {
308                 yaffs_trace(YAFFS_TRACE_NANDACCESS,
309                         "Chunk %d not erased", nand_chunk);
310                 retval = YAFFS_FAIL;
311         }
312
313         yaffs_release_temp_buffer(dev, data);
314
315         return retval;
316
317 }
318
319 static int yaffs_verify_chunk_written(struct yaffs_dev *dev,
320                                       int nand_chunk,
321                                       const u8 *data,
322                                       struct yaffs_ext_tags *tags)
323 {
324         int retval = YAFFS_OK;
325         struct yaffs_ext_tags temp_tags;
326         u8 *buffer = yaffs_get_temp_buffer(dev);
327         int result;
328
329         result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags);
330         if (memcmp(buffer, data, dev->data_bytes_per_chunk) ||
331             temp_tags.obj_id != tags->obj_id ||
332             temp_tags.chunk_id != tags->chunk_id ||
333             temp_tags.n_bytes != tags->n_bytes)
334                 retval = YAFFS_FAIL;
335
336         yaffs_release_temp_buffer(dev, buffer);
337
338         return retval;
339 }
340
341
342 int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks)
343 {
344         int reserved_chunks;
345         int reserved_blocks = dev->param.n_reserved_blocks;
346         int checkpt_blocks;
347
348         checkpt_blocks = yaffs_calc_checkpt_blocks_required(dev);
349
350         reserved_chunks =
351             (reserved_blocks + checkpt_blocks) * dev->param.chunks_per_block;
352
353         return (dev->n_free_chunks > (reserved_chunks + n_chunks));
354 }
355
356 static int yaffs_find_alloc_block(struct yaffs_dev *dev)
357 {
358         int i;
359         struct yaffs_block_info *bi;
360
361         if (dev->n_erased_blocks < 1) {
362                 /* Hoosterman we've got a problem.
363                  * Can't get space to gc
364                  */
365                 yaffs_trace(YAFFS_TRACE_ERROR,
366                   "yaffs tragedy: no more erased blocks");
367
368                 return -1;
369         }
370
371         /* Find an empty block. */
372
373         for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
374                 dev->alloc_block_finder++;
375                 if (dev->alloc_block_finder < dev->internal_start_block
376                     || dev->alloc_block_finder > dev->internal_end_block) {
377                         dev->alloc_block_finder = dev->internal_start_block;
378                 }
379
380                 bi = yaffs_get_block_info(dev, dev->alloc_block_finder);
381
382                 if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
383                         bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING;
384                         dev->seq_number++;
385                         bi->seq_number = dev->seq_number;
386                         dev->n_erased_blocks--;
387                         yaffs_trace(YAFFS_TRACE_ALLOCATE,
388                           "Allocated block %d, seq  %d, %d left" ,
389                            dev->alloc_block_finder, dev->seq_number,
390                            dev->n_erased_blocks);
391                         return dev->alloc_block_finder;
392                 }
393         }
394
395         yaffs_trace(YAFFS_TRACE_ALWAYS,
396                 "yaffs tragedy: no more erased blocks, but there should have been %d",
397                 dev->n_erased_blocks);
398
399         return -1;
400 }
401
402 static int yaffs_alloc_chunk(struct yaffs_dev *dev, int use_reserver,
403                              struct yaffs_block_info **block_ptr)
404 {
405         int ret_val;
406         struct yaffs_block_info *bi;
407
408         if (dev->alloc_block < 0) {
409                 /* Get next block to allocate off */
410                 dev->alloc_block = yaffs_find_alloc_block(dev);
411                 dev->alloc_page = 0;
412         }
413
414         if (!use_reserver && !yaffs_check_alloc_available(dev, 1)) {
415                 /* No space unless we're allowed to use the reserve. */
416                 return -1;
417         }
418
419         if (dev->n_erased_blocks < dev->param.n_reserved_blocks
420             && dev->alloc_page == 0)
421                 yaffs_trace(YAFFS_TRACE_ALLOCATE, "Allocating reserve");
422
423         /* Next page please.... */
424         if (dev->alloc_block >= 0) {
425                 bi = yaffs_get_block_info(dev, dev->alloc_block);
426
427                 ret_val = (dev->alloc_block * dev->param.chunks_per_block) +
428                     dev->alloc_page;
429                 bi->pages_in_use++;
430                 yaffs_set_chunk_bit(dev, dev->alloc_block, dev->alloc_page);
431
432                 dev->alloc_page++;
433
434                 dev->n_free_chunks--;
435
436                 /* If the block is full set the state to full */
437                 if (dev->alloc_page >= dev->param.chunks_per_block) {
438                         bi->block_state = YAFFS_BLOCK_STATE_FULL;
439                         dev->alloc_block = -1;
440                 }
441
442                 if (block_ptr)
443                         *block_ptr = bi;
444
445                 return ret_val;
446         }
447
448         yaffs_trace(YAFFS_TRACE_ERROR,
449                 "!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!");
450
451         return -1;
452 }
453
454 static int yaffs_get_erased_chunks(struct yaffs_dev *dev)
455 {
456         int n;
457
458         n = dev->n_erased_blocks * dev->param.chunks_per_block;
459
460         if (dev->alloc_block > 0)
461                 n += (dev->param.chunks_per_block - dev->alloc_page);
462
463         return n;
464
465 }
466
467 /*
468  * yaffs_skip_rest_of_block() skips over the rest of the allocation block
469  * if we don't want to write to it.
470  */
471 void yaffs_skip_rest_of_block(struct yaffs_dev *dev)
472 {
473         struct yaffs_block_info *bi;
474
475         if (dev->alloc_block > 0) {
476                 bi = yaffs_get_block_info(dev, dev->alloc_block);
477                 if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
478                         bi->block_state = YAFFS_BLOCK_STATE_FULL;
479                         dev->alloc_block = -1;
480                 }
481         }
482 }
483
484 static int yaffs_write_new_chunk(struct yaffs_dev *dev,
485                                  const u8 *data,
486                                  struct yaffs_ext_tags *tags, int use_reserver)
487 {
488         int attempts = 0;
489         int write_ok = 0;
490         int chunk;
491
492         yaffs2_checkpt_invalidate(dev);
493
494         do {
495                 struct yaffs_block_info *bi = 0;
496                 int erased_ok = 0;
497
498                 chunk = yaffs_alloc_chunk(dev, use_reserver, &bi);
499                 if (chunk < 0) {
500                         /* no space */
501                         break;
502                 }
503
504                 /* First check this chunk is erased, if it needs
505                  * checking.  The checking policy (unless forced
506                  * always on) is as follows:
507                  *
508                  * Check the first page we try to write in a block.
509                  * If the check passes then we don't need to check any
510                  * more.        If the check fails, we check again...
511                  * If the block has been erased, we don't need to check.
512                  *
513                  * However, if the block has been prioritised for gc,
514                  * then we think there might be something odd about
515                  * this block and stop using it.
516                  *
517                  * Rationale: We should only ever see chunks that have
518                  * not been erased if there was a partially written
519                  * chunk due to power loss.  This checking policy should
520                  * catch that case with very few checks and thus save a
521                  * lot of checks that are most likely not needed.
522                  *
523                  * Mods to the above
524                  * If an erase check fails or the write fails we skip the
525                  * rest of the block.
526                  */
527
528                 /* let's give it a try */
529                 attempts++;
530
531                 if (dev->param.always_check_erased)
532                         bi->skip_erased_check = 0;
533
534                 if (!bi->skip_erased_check) {
535                         erased_ok = yaffs_check_chunk_erased(dev, chunk);
536                         if (erased_ok != YAFFS_OK) {
537                                 yaffs_trace(YAFFS_TRACE_ERROR,
538                                   "**>> yaffs chunk %d was not erased",
539                                   chunk);
540
541                                 /* If not erased, delete this one,
542                                  * skip rest of block and
543                                  * try another chunk */
544                                 yaffs_chunk_del(dev, chunk, 1, __LINE__);
545                                 yaffs_skip_rest_of_block(dev);
546                                 continue;
547                         }
548                 }
549
550                 write_ok = yaffs_wr_chunk_tags_nand(dev, chunk, data, tags);
551
552                 if (!bi->skip_erased_check)
553                         write_ok =
554                             yaffs_verify_chunk_written(dev, chunk, data, tags);
555
556                 if (write_ok != YAFFS_OK) {
557                         /* Clean up aborted write, skip to next block and
558                          * try another chunk */
559                         yaffs_handle_chunk_wr_error(dev, chunk, erased_ok);
560                         continue;
561                 }
562
563                 bi->skip_erased_check = 1;
564
565                 /* Copy the data into the robustification buffer */
566                 yaffs_handle_chunk_wr_ok(dev, chunk, data, tags);
567
568         } while (write_ok != YAFFS_OK &&
569                  (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts));
570
571         if (!write_ok)
572                 chunk = -1;
573
574         if (attempts > 1) {
575                 yaffs_trace(YAFFS_TRACE_ERROR,
576                         "**>> yaffs write required %d attempts",
577                         attempts);
578                 dev->n_retried_writes += (attempts - 1);
579         }
580
581         return chunk;
582 }
583
584 /*
585  * Block retiring for handling a broken block.
586  */
587
588 static void yaffs_retire_block(struct yaffs_dev *dev, int flash_block)
589 {
590         struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
591
592         yaffs2_checkpt_invalidate(dev);
593
594         yaffs2_clear_oldest_dirty_seq(dev, bi);
595
596         if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) {
597                 if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) {
598                         yaffs_trace(YAFFS_TRACE_ALWAYS,
599                                 "yaffs: Failed to mark bad and erase block %d",
600                                 flash_block);
601                 } else {
602                         struct yaffs_ext_tags tags;
603                         int chunk_id =
604                             flash_block * dev->param.chunks_per_block;
605
606                         u8 *buffer = yaffs_get_temp_buffer(dev);
607
608                         memset(buffer, 0xff, dev->data_bytes_per_chunk);
609                         memset(&tags, 0, sizeof(tags));
610                         tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK;
611                         if (dev->param.write_chunk_tags_fn(dev, chunk_id -
612                                                            dev->chunk_offset,
613                                                            buffer,
614                                                            &tags) != YAFFS_OK)
615                                 yaffs_trace(YAFFS_TRACE_ALWAYS,
616                                         "yaffs: Failed to write bad block marker to block %d",
617                                         flash_block);
618
619                         yaffs_release_temp_buffer(dev, buffer);
620                 }
621         }
622
623         bi->block_state = YAFFS_BLOCK_STATE_DEAD;
624         bi->gc_prioritise = 0;
625         bi->needs_retiring = 0;
626
627         dev->n_retired_blocks++;
628 }
629
630 /*---------------- Name handling functions ------------*/
631
632 static u16 yaffs_calc_name_sum(const YCHAR *name)
633 {
634         u16 sum = 0;
635         u16 i = 1;
636
637         if (!name)
638                 return 0;
639
640         while ((*name) && i < (YAFFS_MAX_NAME_LENGTH / 2)) {
641
642                 /* 0x1f mask is case insensitive */
643                 sum += ((*name) & 0x1f) * i;
644                 i++;
645                 name++;
646         }
647         return sum;
648 }
649
650 void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name)
651 {
652         memset(obj->short_name, 0, sizeof(obj->short_name));
653         if (name &&
654                 strnlen(name, YAFFS_SHORT_NAME_LENGTH + 1) <=
655                 YAFFS_SHORT_NAME_LENGTH)
656                 strcpy(obj->short_name, name);
657         else
658                 obj->short_name[0] = _Y('\0');
659         obj->sum = yaffs_calc_name_sum(name);
660 }
661
662 void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
663                                 const struct yaffs_obj_hdr *oh)
664 {
665 #ifdef CONFIG_YAFFS_AUTO_UNICODE
666         YCHAR tmp_name[YAFFS_MAX_NAME_LENGTH + 1];
667         memset(tmp_name, 0, sizeof(tmp_name));
668         yaffs_load_name_from_oh(obj->my_dev, tmp_name, oh->name,
669                                 YAFFS_MAX_NAME_LENGTH + 1);
670         yaffs_set_obj_name(obj, tmp_name);
671 #else
672         yaffs_set_obj_name(obj, oh->name);
673 #endif
674 }
675
676 loff_t yaffs_max_file_size(struct yaffs_dev *dev)
677 {
678         return ((loff_t) YAFFS_MAX_CHUNK_ID) * dev->data_bytes_per_chunk;
679 }
680
681 /*-------------------- TNODES -------------------
682
683  * List of spare tnodes
684  * The list is hooked together using the first pointer
685  * in the tnode.
686  */
687
688 struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev)
689 {
690         struct yaffs_tnode *tn = yaffs_alloc_raw_tnode(dev);
691
692         if (tn) {
693                 memset(tn, 0, dev->tnode_size);
694                 dev->n_tnodes++;
695         }
696
697         dev->checkpoint_blocks_required = 0;    /* force recalculation */
698
699         return tn;
700 }
701
702 /* FreeTnode frees up a tnode and puts it back on the free list */
703 static void yaffs_free_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
704 {
705         yaffs_free_raw_tnode(dev, tn);
706         dev->n_tnodes--;
707         dev->checkpoint_blocks_required = 0;    /* force recalculation */
708 }
709
710 static void yaffs_deinit_tnodes_and_objs(struct yaffs_dev *dev)
711 {
712         yaffs_deinit_raw_tnodes_and_objs(dev);
713         dev->n_obj = 0;
714         dev->n_tnodes = 0;
715 }
716
717 static void yaffs_load_tnode_0(struct yaffs_dev *dev, struct yaffs_tnode *tn,
718                         unsigned pos, unsigned val)
719 {
720         u32 *map = (u32 *) tn;
721         u32 bit_in_map;
722         u32 bit_in_word;
723         u32 word_in_map;
724         u32 mask;
725
726         pos &= YAFFS_TNODES_LEVEL0_MASK;
727         val >>= dev->chunk_grp_bits;
728
729         bit_in_map = pos * dev->tnode_width;
730         word_in_map = bit_in_map / 32;
731         bit_in_word = bit_in_map & (32 - 1);
732
733         mask = dev->tnode_mask << bit_in_word;
734
735         map[word_in_map] &= ~mask;
736         map[word_in_map] |= (mask & (val << bit_in_word));
737
738         if (dev->tnode_width > (32 - bit_in_word)) {
739                 bit_in_word = (32 - bit_in_word);
740                 word_in_map++;
741                 mask =
742                     dev->tnode_mask >> bit_in_word;
743                 map[word_in_map] &= ~mask;
744                 map[word_in_map] |= (mask & (val >> bit_in_word));
745         }
746 }
747
748 u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
749                          unsigned pos)
750 {
751         u32 *map = (u32 *) tn;
752         u32 bit_in_map;
753         u32 bit_in_word;
754         u32 word_in_map;
755         u32 val;
756
757         pos &= YAFFS_TNODES_LEVEL0_MASK;
758
759         bit_in_map = pos * dev->tnode_width;
760         word_in_map = bit_in_map / 32;
761         bit_in_word = bit_in_map & (32 - 1);
762
763         val = map[word_in_map] >> bit_in_word;
764
765         if (dev->tnode_width > (32 - bit_in_word)) {
766                 bit_in_word = (32 - bit_in_word);
767                 word_in_map++;
768                 val |= (map[word_in_map] << bit_in_word);
769         }
770
771         val &= dev->tnode_mask;
772         val <<= dev->chunk_grp_bits;
773
774         return val;
775 }
776
777 /* ------------------- End of individual tnode manipulation -----------------*/
778
779 /* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
780  * The look up tree is represented by the top tnode and the number of top_level
781  * in the tree. 0 means only the level 0 tnode is in the tree.
782  */
783
784 /* FindLevel0Tnode finds the level 0 tnode, if one exists. */
785 struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
786                                        struct yaffs_file_var *file_struct,
787                                        u32 chunk_id)
788 {
789         struct yaffs_tnode *tn = file_struct->top;
790         u32 i;
791         int required_depth;
792         int level = file_struct->top_level;
793
794         dev = dev;
795
796         /* Check sane level and chunk Id */
797         if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
798                 return NULL;
799
800         if (chunk_id > YAFFS_MAX_CHUNK_ID)
801                 return NULL;
802
803         /* First check we're tall enough (ie enough top_level) */
804
805         i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
806         required_depth = 0;
807         while (i) {
808                 i >>= YAFFS_TNODES_INTERNAL_BITS;
809                 required_depth++;
810         }
811
812         if (required_depth > file_struct->top_level)
813                 return NULL;    /* Not tall enough, so we can't find it */
814
815         /* Traverse down to level 0 */
816         while (level > 0 && tn) {
817                 tn = tn->internal[(chunk_id >>
818                                    (YAFFS_TNODES_LEVEL0_BITS +
819                                     (level - 1) *
820                                     YAFFS_TNODES_INTERNAL_BITS)) &
821                                   YAFFS_TNODES_INTERNAL_MASK];
822                 level--;
823         }
824
825         return tn;
826 }
827
828 /* add_find_tnode_0 finds the level 0 tnode if it exists,
829  * otherwise first expands the tree.
830  * This happens in two steps:
831  *  1. If the tree isn't tall enough, then make it taller.
832  *  2. Scan down the tree towards the level 0 tnode adding tnodes if required.
833  *
834  * Used when modifying the tree.
835  *
836  *  If the tn argument is NULL, then a fresh tnode will be added otherwise the
837  *  specified tn will be plugged into the ttree.
838  */
839
840 struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
841                                            struct yaffs_file_var *file_struct,
842                                            u32 chunk_id,
843                                            struct yaffs_tnode *passed_tn)
844 {
845         int required_depth;
846         int i;
847         int l;
848         struct yaffs_tnode *tn;
849         u32 x;
850
851         /* Check sane level and page Id */
852         if (file_struct->top_level < 0 ||
853             file_struct->top_level > YAFFS_TNODES_MAX_LEVEL)
854                 return NULL;
855
856         if (chunk_id > YAFFS_MAX_CHUNK_ID)
857                 return NULL;
858
859         /* First check we're tall enough (ie enough top_level) */
860
861         x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
862         required_depth = 0;
863         while (x) {
864                 x >>= YAFFS_TNODES_INTERNAL_BITS;
865                 required_depth++;
866         }
867
868         if (required_depth > file_struct->top_level) {
869                 /* Not tall enough, gotta make the tree taller */
870                 for (i = file_struct->top_level; i < required_depth; i++) {
871
872                         tn = yaffs_get_tnode(dev);
873
874                         if (tn) {
875                                 tn->internal[0] = file_struct->top;
876                                 file_struct->top = tn;
877                                 file_struct->top_level++;
878                         } else {
879                                 yaffs_trace(YAFFS_TRACE_ERROR,
880                                         "yaffs: no more tnodes");
881                                 return NULL;
882                         }
883                 }
884         }
885
886         /* Traverse down to level 0, adding anything we need */
887
888         l = file_struct->top_level;
889         tn = file_struct->top;
890
891         if (l > 0) {
892                 while (l > 0 && tn) {
893                         x = (chunk_id >>
894                              (YAFFS_TNODES_LEVEL0_BITS +
895                               (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
896                             YAFFS_TNODES_INTERNAL_MASK;
897
898                         if ((l > 1) && !tn->internal[x]) {
899                                 /* Add missing non-level-zero tnode */
900                                 tn->internal[x] = yaffs_get_tnode(dev);
901                                 if (!tn->internal[x])
902                                         return NULL;
903                         } else if (l == 1) {
904                                 /* Looking from level 1 at level 0 */
905                                 if (passed_tn) {
906                                         /* If we already have one, release it */
907                                         if (tn->internal[x])
908                                                 yaffs_free_tnode(dev,
909                                                         tn->internal[x]);
910                                         tn->internal[x] = passed_tn;
911
912                                 } else if (!tn->internal[x]) {
913                                         /* Don't have one, none passed in */
914                                         tn->internal[x] = yaffs_get_tnode(dev);
915                                         if (!tn->internal[x])
916                                                 return NULL;
917                                 }
918                         }
919
920                         tn = tn->internal[x];
921                         l--;
922                 }
923         } else {
924                 /* top is level 0 */
925                 if (passed_tn) {
926                         memcpy(tn, passed_tn,
927                                (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8);
928                         yaffs_free_tnode(dev, passed_tn);
929                 }
930         }
931
932         return tn;
933 }
934
935 static int yaffs_tags_match(const struct yaffs_ext_tags *tags, int obj_id,
936                             int chunk_obj)
937 {
938         return (tags->chunk_id == chunk_obj &&
939                 tags->obj_id == obj_id &&
940                 !tags->is_deleted) ? 1 : 0;
941
942 }
943
944 static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, int the_chunk,
945                                         struct yaffs_ext_tags *tags, int obj_id,
946                                         int inode_chunk)
947 {
948         int j;
949
950         for (j = 0; the_chunk && j < dev->chunk_grp_size; j++) {
951                 if (yaffs_check_chunk_bit
952                     (dev, the_chunk / dev->param.chunks_per_block,
953                      the_chunk % dev->param.chunks_per_block)) {
954
955                         if (dev->chunk_grp_size == 1)
956                                 return the_chunk;
957                         else {
958                                 yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
959                                                          tags);
960                                 if (yaffs_tags_match(tags,
961                                                         obj_id, inode_chunk)) {
962                                         /* found it; */
963                                         return the_chunk;
964                                 }
965                         }
966                 }
967                 the_chunk++;
968         }
969         return -1;
970 }
971
972 static int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
973                                     struct yaffs_ext_tags *tags)
974 {
975         /*Get the Tnode, then get the level 0 offset chunk offset */
976         struct yaffs_tnode *tn;
977         int the_chunk = -1;
978         struct yaffs_ext_tags local_tags;
979         int ret_val = -1;
980         struct yaffs_dev *dev = in->my_dev;
981
982         if (!tags) {
983                 /* Passed a NULL, so use our own tags space */
984                 tags = &local_tags;
985         }
986
987         tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
988
989         if (!tn)
990                 return ret_val;
991
992         the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
993
994         ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
995                                               inode_chunk);
996         return ret_val;
997 }
998
999 static int yaffs_find_del_file_chunk(struct yaffs_obj *in, int inode_chunk,
1000                                      struct yaffs_ext_tags *tags)
1001 {
1002         /* Get the Tnode, then get the level 0 offset chunk offset */
1003         struct yaffs_tnode *tn;
1004         int the_chunk = -1;
1005         struct yaffs_ext_tags local_tags;
1006         struct yaffs_dev *dev = in->my_dev;
1007         int ret_val = -1;
1008
1009         if (!tags) {
1010                 /* Passed a NULL, so use our own tags space */
1011                 tags = &local_tags;
1012         }
1013
1014         tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
1015
1016         if (!tn)
1017                 return ret_val;
1018
1019         the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
1020
1021         ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
1022                                               inode_chunk);
1023
1024         /* Delete the entry in the filestructure (if found) */
1025         if (ret_val != -1)
1026                 yaffs_load_tnode_0(dev, tn, inode_chunk, 0);
1027
1028         return ret_val;
1029 }
1030
1031 int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
1032                             int nand_chunk, int in_scan)
1033 {
1034         /* NB in_scan is zero unless scanning.
1035          * For forward scanning, in_scan is > 0;
1036          * for backward scanning in_scan is < 0
1037          *
1038          * nand_chunk = 0 is a dummy insert to make sure the tnodes are there.
1039          */
1040
1041         struct yaffs_tnode *tn;
1042         struct yaffs_dev *dev = in->my_dev;
1043         int existing_cunk;
1044         struct yaffs_ext_tags existing_tags;
1045         struct yaffs_ext_tags new_tags;
1046         unsigned existing_serial, new_serial;
1047
1048         if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) {
1049                 /* Just ignore an attempt at putting a chunk into a non-file
1050                  * during scanning.
1051                  * If it is not during Scanning then something went wrong!
1052                  */
1053                 if (!in_scan) {
1054                         yaffs_trace(YAFFS_TRACE_ERROR,
1055                                 "yaffs tragedy:attempt to put data chunk into a non-file"
1056                                 );
1057                         BUG();
1058                 }
1059
1060                 yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
1061                 return YAFFS_OK;
1062         }
1063
1064         tn = yaffs_add_find_tnode_0(dev,
1065                                     &in->variant.file_variant,
1066                                     inode_chunk, NULL);
1067         if (!tn)
1068                 return YAFFS_FAIL;
1069
1070         if (!nand_chunk)
1071                 /* Dummy insert, bail now */
1072                 return YAFFS_OK;
1073
1074         existing_cunk = yaffs_get_group_base(dev, tn, inode_chunk);
1075
1076         if (in_scan != 0) {
1077                 /* If we're scanning then we need to test for duplicates
1078                  * NB This does not need to be efficient since it should only
1079                  * happen when the power fails during a write, then only one
1080                  * chunk should ever be affected.
1081                  *
1082                  * Correction for YAFFS2: This could happen quite a lot and we
1083                  * need to think about efficiency! TODO
1084                  * Update: For backward scanning we don't need to re-read tags
1085                  * so this is quite cheap.
1086                  */
1087
1088                 if (existing_cunk > 0) {
1089                         /* NB Right now existing chunk will not be real
1090                          * chunk_id if the chunk group size > 1
1091                          * thus we have to do a FindChunkInFile to get the
1092                          * real chunk id.
1093                          *
1094                          * We have a duplicate now we need to decide which
1095                          * one to use:
1096                          *
1097                          * Backwards scanning YAFFS2: The old one is what
1098                          * we use, dump the new one.
1099                          * YAFFS1: Get both sets of tags and compare serial
1100                          * numbers.
1101                          */
1102
1103                         if (in_scan > 0) {
1104                                 /* Only do this for forward scanning */
1105                                 yaffs_rd_chunk_tags_nand(dev,
1106                                                          nand_chunk,
1107                                                          NULL, &new_tags);
1108
1109                                 /* Do a proper find */
1110                                 existing_cunk =
1111                                     yaffs_find_chunk_in_file(in, inode_chunk,
1112                                                              &existing_tags);
1113                         }
1114
1115                         if (existing_cunk <= 0) {
1116                                 /*Hoosterman - how did this happen? */
1117
1118                                 yaffs_trace(YAFFS_TRACE_ERROR,
1119                                         "yaffs tragedy: existing chunk < 0 in scan"
1120                                         );
1121
1122                         }
1123
1124                         /* NB The deleted flags should be false, otherwise
1125                          * the chunks will not be loaded during a scan
1126                          */
1127
1128                         if (in_scan > 0) {
1129                                 new_serial = new_tags.serial_number;
1130                                 existing_serial = existing_tags.serial_number;
1131                         }
1132
1133                         if ((in_scan > 0) &&
1134                             (existing_cunk <= 0 ||
1135                              ((existing_serial + 1) & 3) == new_serial)) {
1136                                 /* Forward scanning.
1137                                  * Use new
1138                                  * Delete the old one and drop through to
1139                                  * update the tnode
1140                                  */
1141                                 yaffs_chunk_del(dev, existing_cunk, 1,
1142                                                 __LINE__);
1143                         } else {
1144                                 /* Backward scanning or we want to use the
1145                                  * existing one
1146                                  * Delete the new one and return early so that
1147                                  * the tnode isn't changed
1148                                  */
1149                                 yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
1150                                 return YAFFS_OK;
1151                         }
1152                 }
1153
1154         }
1155
1156         if (existing_cunk == 0)
1157                 in->n_data_chunks++;
1158
1159         yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk);
1160
1161         return YAFFS_OK;
1162 }
1163
1164 static void yaffs_soft_del_chunk(struct yaffs_dev *dev, int chunk)
1165 {
1166         struct yaffs_block_info *the_block;
1167         unsigned block_no;
1168
1169         yaffs_trace(YAFFS_TRACE_DELETION, "soft delete chunk %d", chunk);
1170
1171         block_no = chunk / dev->param.chunks_per_block;
1172         the_block = yaffs_get_block_info(dev, block_no);
1173         if (the_block) {
1174                 the_block->soft_del_pages++;
1175                 dev->n_free_chunks++;
1176                 yaffs2_update_oldest_dirty_seq(dev, block_no, the_block);
1177         }
1178 }
1179
1180 /* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all
1181  * the chunks in the file.
1182  * All soft deleting does is increment the block's softdelete count and pulls
1183  * the chunk out of the tnode.
1184  * Thus, essentially this is the same as DeleteWorker except that the chunks
1185  * are soft deleted.
1186  */
1187
1188 static int yaffs_soft_del_worker(struct yaffs_obj *in, struct yaffs_tnode *tn,
1189                                  u32 level, int chunk_offset)
1190 {
1191         int i;
1192         int the_chunk;
1193         int all_done = 1;
1194         struct yaffs_dev *dev = in->my_dev;
1195
1196         if (!tn)
1197                 return 1;
1198
1199         if (level > 0) {
1200                 for (i = YAFFS_NTNODES_INTERNAL - 1;
1201                         all_done && i >= 0;
1202                         i--) {
1203                         if (tn->internal[i]) {
1204                                 all_done =
1205                                     yaffs_soft_del_worker(in,
1206                                         tn->internal[i],
1207                                         level - 1,
1208                                         (chunk_offset <<
1209                                         YAFFS_TNODES_INTERNAL_BITS)
1210                                         + i);
1211                                 if (all_done) {
1212                                         yaffs_free_tnode(dev,
1213                                                 tn->internal[i]);
1214                                         tn->internal[i] = NULL;
1215                                 } else {
1216                                         /* Can this happen? */
1217                                 }
1218                         }
1219                 }
1220                 return (all_done) ? 1 : 0;
1221         }
1222
1223         /* level 0 */
1224          for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
1225                 the_chunk = yaffs_get_group_base(dev, tn, i);
1226                 if (the_chunk) {
1227                         yaffs_soft_del_chunk(dev, the_chunk);
1228                         yaffs_load_tnode_0(dev, tn, i, 0);
1229                 }
1230         }
1231         return 1;
1232 }
1233
1234 static void yaffs_remove_obj_from_dir(struct yaffs_obj *obj)
1235 {
1236         struct yaffs_dev *dev = obj->my_dev;
1237         struct yaffs_obj *parent;
1238
1239         yaffs_verify_obj_in_dir(obj);
1240         parent = obj->parent;
1241
1242         yaffs_verify_dir(parent);
1243
1244         if (dev && dev->param.remove_obj_fn)
1245                 dev->param.remove_obj_fn(obj);
1246
1247         list_del_init(&obj->siblings);
1248         obj->parent = NULL;
1249
1250         yaffs_verify_dir(parent);
1251 }
1252
1253 void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj)
1254 {
1255         if (!directory) {
1256                 yaffs_trace(YAFFS_TRACE_ALWAYS,
1257                         "tragedy: Trying to add an object to a null pointer directory"
1258                         );
1259                 BUG();
1260                 return;
1261         }
1262         if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
1263                 yaffs_trace(YAFFS_TRACE_ALWAYS,
1264                         "tragedy: Trying to add an object to a non-directory"
1265                         );
1266                 BUG();
1267         }
1268
1269         if (obj->siblings.prev == NULL) {
1270                 /* Not initialised */
1271                 BUG();
1272         }
1273
1274         yaffs_verify_dir(directory);
1275
1276         yaffs_remove_obj_from_dir(obj);
1277
1278         /* Now add it */
1279         list_add(&obj->siblings, &directory->variant.dir_variant.children);
1280         obj->parent = directory;
1281
1282         if (directory == obj->my_dev->unlinked_dir
1283             || directory == obj->my_dev->del_dir) {
1284                 obj->unlinked = 1;
1285                 obj->my_dev->n_unlinked_files++;
1286                 obj->rename_allowed = 0;
1287         }
1288
1289         yaffs_verify_dir(directory);
1290         yaffs_verify_obj_in_dir(obj);
1291 }
1292
1293 static int yaffs_change_obj_name(struct yaffs_obj *obj,
1294                                  struct yaffs_obj *new_dir,
1295                                  const YCHAR *new_name, int force, int shadows)
1296 {
1297         int unlink_op;
1298         int del_op;
1299         struct yaffs_obj *existing_target;
1300
1301         if (new_dir == NULL)
1302                 new_dir = obj->parent;  /* use the old directory */
1303
1304         if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
1305                 yaffs_trace(YAFFS_TRACE_ALWAYS,
1306                         "tragedy: yaffs_change_obj_name: new_dir is not a directory"
1307                         );
1308                 BUG();
1309         }
1310
1311         unlink_op = (new_dir == obj->my_dev->unlinked_dir);
1312         del_op = (new_dir == obj->my_dev->del_dir);
1313
1314         existing_target = yaffs_find_by_name(new_dir, new_name);
1315
1316         /* If the object is a file going into the unlinked directory,
1317          *   then it is OK to just stuff it in since duplicate names are OK.
1318          *   else only proceed if the new name does not exist and we're putting
1319          *   it into a directory.
1320          */
1321         if (!(unlink_op || del_op || force ||
1322               shadows > 0 || !existing_target) ||
1323               new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
1324                 return YAFFS_FAIL;
1325
1326         yaffs_set_obj_name(obj, new_name);
1327         obj->dirty = 1;
1328         yaffs_add_obj_to_dir(new_dir, obj);
1329
1330         if (unlink_op)
1331                 obj->unlinked = 1;
1332
1333         /* If it is a deletion then we mark it as a shrink for gc  */
1334         if (yaffs_update_oh(obj, new_name, 0, del_op, shadows, NULL) >= 0)
1335                 return YAFFS_OK;
1336
1337         return YAFFS_FAIL;
1338 }
1339
1340 /*------------------------ Short Operations Cache ------------------------------
1341  *   In many situations where there is no high level buffering  a lot of
1342  *   reads might be short sequential reads, and a lot of writes may be short
1343  *   sequential writes. eg. scanning/writing a jpeg file.
1344  *   In these cases, a short read/write cache can provide a huge perfomance
1345  *   benefit with dumb-as-a-rock code.
1346  *   In Linux, the page cache provides read buffering and the short op cache
1347  *   provides write buffering.
1348  *
1349  *   There are a small number (~10) of cache chunks per device so that we don't
1350  *   need a very intelligent search.
1351  */
1352
1353 static int yaffs_obj_cache_dirty(struct yaffs_obj *obj)
1354 {
1355         struct yaffs_dev *dev = obj->my_dev;
1356         int i;
1357         struct yaffs_cache *cache;
1358         int n_caches = obj->my_dev->param.n_caches;
1359
1360         for (i = 0; i < n_caches; i++) {
1361                 cache = &dev->cache[i];
1362                 if (cache->object == obj && cache->dirty)
1363                         return 1;
1364         }
1365
1366         return 0;
1367 }
1368
1369 static void yaffs_flush_file_cache(struct yaffs_obj *obj)
1370 {
1371         struct yaffs_dev *dev = obj->my_dev;
1372         int lowest = -99;       /* Stop compiler whining. */
1373         int i;
1374         struct yaffs_cache *cache;
1375         int chunk_written = 0;
1376         int n_caches = obj->my_dev->param.n_caches;
1377
1378         if (n_caches < 1)
1379                 return;
1380         do {
1381                 cache = NULL;
1382
1383                 /* Find the lowest dirty chunk for this object */
1384                 for (i = 0; i < n_caches; i++) {
1385                         if (dev->cache[i].object == obj &&
1386                             dev->cache[i].dirty) {
1387                                 if (!cache ||
1388                                     dev->cache[i].chunk_id < lowest) {
1389                                         cache = &dev->cache[i];
1390                                         lowest = cache->chunk_id;
1391                                 }
1392                         }
1393                 }
1394
1395                 if (cache && !cache->locked) {
1396                         /* Write it out and free it up */
1397                         chunk_written =
1398                             yaffs_wr_data_obj(cache->object,
1399                                               cache->chunk_id,
1400                                               cache->data,
1401                                               cache->n_bytes, 1);
1402                         cache->dirty = 0;
1403                         cache->object = NULL;
1404                 }
1405         } while (cache && chunk_written > 0);
1406
1407         if (cache)
1408                 /* Hoosterman, disk full while writing cache out. */
1409                 yaffs_trace(YAFFS_TRACE_ERROR,
1410                         "yaffs tragedy: no space during cache write");
1411 }
1412
1413 /*yaffs_flush_whole_cache(dev)
1414  *
1415  *
1416  */
1417
1418 void yaffs_flush_whole_cache(struct yaffs_dev *dev)
1419 {
1420         struct yaffs_obj *obj;
1421         int n_caches = dev->param.n_caches;
1422         int i;
1423
1424         /* Find a dirty object in the cache and flush it...
1425          * until there are no further dirty objects.
1426          */
1427         do {
1428                 obj = NULL;
1429                 for (i = 0; i < n_caches && !obj; i++) {
1430                         if (dev->cache[i].object && dev->cache[i].dirty)
1431                                 obj = dev->cache[i].object;
1432                 }
1433                 if (obj)
1434                         yaffs_flush_file_cache(obj);
1435         } while (obj);
1436
1437 }
1438
1439 /* Grab us a cache chunk for use.
1440  * First look for an empty one.
1441  * Then look for the least recently used non-dirty one.
1442  * Then look for the least recently used dirty one...., flush and look again.
1443  */
1444 static struct yaffs_cache *yaffs_grab_chunk_worker(struct yaffs_dev *dev)
1445 {
1446         int i;
1447
1448         if (dev->param.n_caches > 0) {
1449                 for (i = 0; i < dev->param.n_caches; i++) {
1450                         if (!dev->cache[i].object)
1451                                 return &dev->cache[i];
1452                 }
1453         }
1454         return NULL;
1455 }
1456
1457 static struct yaffs_cache *yaffs_grab_chunk_cache(struct yaffs_dev *dev)
1458 {
1459         struct yaffs_cache *cache;
1460         struct yaffs_obj *the_obj;
1461         int usage;
1462         int i;
1463         int pushout;
1464
1465         if (dev->param.n_caches < 1)
1466                 return NULL;
1467
1468         /* Try find a non-dirty one... */
1469
1470         cache = yaffs_grab_chunk_worker(dev);
1471
1472         if (!cache) {
1473                 /* They were all dirty, find the LRU object and flush
1474                  * its cache, then  find again.
1475                  * NB what's here is not very accurate,
1476                  * we actually flush the object with the LRU chunk.
1477                  */
1478
1479                 /* With locking we can't assume we can use entry zero,
1480                  * Set the_obj to a valid pointer for Coverity. */
1481                 the_obj = dev->cache[0].object;
1482                 usage = -1;
1483                 cache = NULL;
1484                 pushout = -1;
1485
1486                 for (i = 0; i < dev->param.n_caches; i++) {
1487                         if (dev->cache[i].object &&
1488                             !dev->cache[i].locked &&
1489                             (dev->cache[i].last_use < usage ||
1490                             !cache)) {
1491                                 usage = dev->cache[i].last_use;
1492                                 the_obj = dev->cache[i].object;
1493                                 cache = &dev->cache[i];
1494                                 pushout = i;
1495                         }
1496                 }
1497
1498                 if (!cache || cache->dirty) {
1499                         /* Flush and try again */
1500                         yaffs_flush_file_cache(the_obj);
1501                         cache = yaffs_grab_chunk_worker(dev);
1502                 }
1503         }
1504         return cache;
1505 }
1506
1507 /* Find a cached chunk */
1508 static struct yaffs_cache *yaffs_find_chunk_cache(const struct yaffs_obj *obj,
1509                                                   int chunk_id)
1510 {
1511         struct yaffs_dev *dev = obj->my_dev;
1512         int i;
1513
1514         if (dev->param.n_caches < 1)
1515                 return NULL;
1516
1517         for (i = 0; i < dev->param.n_caches; i++) {
1518                 if (dev->cache[i].object == obj &&
1519                     dev->cache[i].chunk_id == chunk_id) {
1520                         dev->cache_hits++;
1521
1522                         return &dev->cache[i];
1523                 }
1524         }
1525         return NULL;
1526 }
1527
1528 /* Mark the chunk for the least recently used algorithym */
1529 static void yaffs_use_cache(struct yaffs_dev *dev, struct yaffs_cache *cache,
1530                             int is_write)
1531 {
1532         int i;
1533
1534         if (dev->param.n_caches < 1)
1535                 return;
1536
1537         if (dev->cache_last_use < 0 ||
1538                 dev->cache_last_use > 100000000) {
1539                 /* Reset the cache usages */
1540                 for (i = 1; i < dev->param.n_caches; i++)
1541                         dev->cache[i].last_use = 0;
1542
1543                 dev->cache_last_use = 0;
1544         }
1545         dev->cache_last_use++;
1546         cache->last_use = dev->cache_last_use;
1547
1548         if (is_write)
1549                 cache->dirty = 1;
1550 }
1551
1552 /* Invalidate a single cache page.
1553  * Do this when a whole page gets written,
1554  * ie the short cache for this page is no longer valid.
1555  */
1556 static void yaffs_invalidate_chunk_cache(struct yaffs_obj *object, int chunk_id)
1557 {
1558         struct yaffs_cache *cache;
1559
1560         if (object->my_dev->param.n_caches > 0) {
1561                 cache = yaffs_find_chunk_cache(object, chunk_id);
1562
1563                 if (cache)
1564                         cache->object = NULL;
1565         }
1566 }
1567
1568 /* Invalidate all the cache pages associated with this object
1569  * Do this whenever ther file is deleted or resized.
1570  */
1571 static void yaffs_invalidate_whole_cache(struct yaffs_obj *in)
1572 {
1573         int i;
1574         struct yaffs_dev *dev = in->my_dev;
1575
1576         if (dev->param.n_caches > 0) {
1577                 /* Invalidate it. */
1578                 for (i = 0; i < dev->param.n_caches; i++) {
1579                         if (dev->cache[i].object == in)
1580                                 dev->cache[i].object = NULL;
1581                 }
1582         }
1583 }
1584
1585 static void yaffs_unhash_obj(struct yaffs_obj *obj)
1586 {
1587         int bucket;
1588         struct yaffs_dev *dev = obj->my_dev;
1589
1590         /* If it is still linked into the bucket list, free from the list */
1591         if (!list_empty(&obj->hash_link)) {
1592                 list_del_init(&obj->hash_link);
1593                 bucket = yaffs_hash_fn(obj->obj_id);
1594                 dev->obj_bucket[bucket].count--;
1595         }
1596 }
1597
1598 /*  FreeObject frees up a Object and puts it back on the free list */
1599 static void yaffs_free_obj(struct yaffs_obj *obj)
1600 {
1601         struct yaffs_dev *dev;
1602
1603         if (!obj) {
1604                 BUG();
1605                 return;
1606         }
1607         dev = obj->my_dev;
1608         yaffs_trace(YAFFS_TRACE_OS, "FreeObject %p inode %p",
1609                 obj, obj->my_inode);
1610         if (obj->parent)
1611                 BUG();
1612         if (!list_empty(&obj->siblings))
1613                 BUG();
1614
1615         if (obj->my_inode) {
1616                 /* We're still hooked up to a cached inode.
1617                  * Don't delete now, but mark for later deletion
1618                  */
1619                 obj->defered_free = 1;
1620                 return;
1621         }
1622
1623         yaffs_unhash_obj(obj);
1624
1625         yaffs_free_raw_obj(dev, obj);
1626         dev->n_obj--;
1627         dev->checkpoint_blocks_required = 0;    /* force recalculation */
1628 }
1629
1630 void yaffs_handle_defered_free(struct yaffs_obj *obj)
1631 {
1632         if (obj->defered_free)
1633                 yaffs_free_obj(obj);
1634 }
1635
1636 static int yaffs_generic_obj_del(struct yaffs_obj *in)
1637 {
1638         /* Iinvalidate the file's data in the cache, without flushing. */
1639         yaffs_invalidate_whole_cache(in);
1640
1641         if (in->my_dev->param.is_yaffs2 && in->parent != in->my_dev->del_dir) {
1642                 /* Move to unlinked directory so we have a deletion record */
1643                 yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0,
1644                                       0);
1645         }
1646
1647         yaffs_remove_obj_from_dir(in);
1648         yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__);
1649         in->hdr_chunk = 0;
1650
1651         yaffs_free_obj(in);
1652         return YAFFS_OK;
1653
1654 }
1655
1656 static void yaffs_soft_del_file(struct yaffs_obj *obj)
1657 {
1658         if (!obj->deleted ||
1659             obj->variant_type != YAFFS_OBJECT_TYPE_FILE ||
1660             obj->soft_del)
1661                 return;
1662
1663         if (obj->n_data_chunks <= 0) {
1664                 /* Empty file with no duplicate object headers,
1665                  * just delete it immediately */
1666                 yaffs_free_tnode(obj->my_dev, obj->variant.file_variant.top);
1667                 obj->variant.file_variant.top = NULL;
1668                 yaffs_trace(YAFFS_TRACE_TRACING,
1669                         "yaffs: Deleting empty file %d",
1670                         obj->obj_id);
1671                 yaffs_generic_obj_del(obj);
1672         } else {
1673                 yaffs_soft_del_worker(obj,
1674                                       obj->variant.file_variant.top,
1675                                       obj->variant.
1676                                       file_variant.top_level, 0);
1677                 obj->soft_del = 1;
1678         }
1679 }
1680
1681 /* Pruning removes any part of the file structure tree that is beyond the
1682  * bounds of the file (ie that does not point to chunks).
1683  *
1684  * A file should only get pruned when its size is reduced.
1685  *
1686  * Before pruning, the chunks must be pulled from the tree and the
1687  * level 0 tnode entries must be zeroed out.
1688  * Could also use this for file deletion, but that's probably better handled
1689  * by a special case.
1690  *
1691  * This function is recursive. For levels > 0 the function is called again on
1692  * any sub-tree. For level == 0 we just check if the sub-tree has data.
1693  * If there is no data in a subtree then it is pruned.
1694  */
1695
1696 static struct yaffs_tnode *yaffs_prune_worker(struct yaffs_dev *dev,
1697                                               struct yaffs_tnode *tn, u32 level,
1698                                               int del0)
1699 {
1700         int i;
1701         int has_data;
1702
1703         if (!tn)
1704                 return tn;
1705
1706         has_data = 0;
1707
1708         if (level > 0) {
1709                 for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
1710                         if (tn->internal[i]) {
1711                                 tn->internal[i] =
1712                                     yaffs_prune_worker(dev,
1713                                                 tn->internal[i],
1714                                                 level - 1,
1715                                                 (i == 0) ? del0 : 1);
1716                         }
1717
1718                         if (tn->internal[i])
1719                                 has_data++;
1720                 }
1721         } else {
1722                 int tnode_size_u32 = dev->tnode_size / sizeof(u32);
1723                 u32 *map = (u32 *) tn;
1724
1725                 for (i = 0; !has_data && i < tnode_size_u32; i++) {
1726                         if (map[i])
1727                                 has_data++;
1728                 }
1729         }
1730
1731         if (has_data == 0 && del0) {
1732                 /* Free and return NULL */
1733                 yaffs_free_tnode(dev, tn);
1734                 tn = NULL;
1735         }
1736         return tn;
1737 }
1738
1739 static int yaffs_prune_tree(struct yaffs_dev *dev,
1740                             struct yaffs_file_var *file_struct)
1741 {
1742         int i;
1743         int has_data;
1744         int done = 0;
1745         struct yaffs_tnode *tn;
1746
1747         if (file_struct->top_level < 1)
1748                 return YAFFS_OK;
1749
1750         file_struct->top =
1751            yaffs_prune_worker(dev, file_struct->top, file_struct->top_level, 0);
1752
1753         /* Now we have a tree with all the non-zero branches NULL but
1754          * the height is the same as it was.
1755          * Let's see if we can trim internal tnodes to shorten the tree.
1756          * We can do this if only the 0th element in the tnode is in use
1757          * (ie all the non-zero are NULL)
1758          */
1759
1760         while (file_struct->top_level && !done) {
1761                 tn = file_struct->top;
1762
1763                 has_data = 0;
1764                 for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
1765                         if (tn->internal[i])
1766                                 has_data++;
1767                 }
1768
1769                 if (!has_data) {
1770                         file_struct->top = tn->internal[0];
1771                         file_struct->top_level--;
1772                         yaffs_free_tnode(dev, tn);
1773                 } else {
1774                         done = 1;
1775                 }
1776         }
1777
1778         return YAFFS_OK;
1779 }
1780
1781 /*-------------------- End of File Structure functions.-------------------*/
1782
1783 /* alloc_empty_obj gets us a clean Object.*/
1784 static struct yaffs_obj *yaffs_alloc_empty_obj(struct yaffs_dev *dev)
1785 {
1786         struct yaffs_obj *obj = yaffs_alloc_raw_obj(dev);
1787
1788         if (!obj)
1789                 return obj;
1790
1791         dev->n_obj++;
1792
1793         /* Now sweeten it up... */
1794
1795         memset(obj, 0, sizeof(struct yaffs_obj));
1796         obj->being_created = 1;
1797
1798         obj->my_dev = dev;
1799         obj->hdr_chunk = 0;
1800         obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN;
1801         INIT_LIST_HEAD(&(obj->hard_links));
1802         INIT_LIST_HEAD(&(obj->hash_link));
1803         INIT_LIST_HEAD(&obj->siblings);
1804
1805         /* Now make the directory sane */
1806         if (dev->root_dir) {
1807                 obj->parent = dev->root_dir;
1808                 list_add(&(obj->siblings),
1809                          &dev->root_dir->variant.dir_variant.children);
1810         }
1811
1812         /* Add it to the lost and found directory.
1813          * NB Can't put root or lost-n-found in lost-n-found so
1814          * check if lost-n-found exists first
1815          */
1816         if (dev->lost_n_found)
1817                 yaffs_add_obj_to_dir(dev->lost_n_found, obj);
1818
1819         obj->being_created = 0;
1820
1821         dev->checkpoint_blocks_required = 0;    /* force recalculation */
1822
1823         return obj;
1824 }
1825
1826 static int yaffs_find_nice_bucket(struct yaffs_dev *dev)
1827 {
1828         int i;
1829         int l = 999;
1830         int lowest = 999999;
1831
1832         /* Search for the shortest list or one that
1833          * isn't too long.
1834          */
1835
1836         for (i = 0; i < 10 && lowest > 4; i++) {
1837                 dev->bucket_finder++;
1838                 dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS;
1839                 if (dev->obj_bucket[dev->bucket_finder].count < lowest) {
1840                         lowest = dev->obj_bucket[dev->bucket_finder].count;
1841                         l = dev->bucket_finder;
1842                 }
1843         }
1844
1845         return l;
1846 }
1847
1848 static int yaffs_new_obj_id(struct yaffs_dev *dev)
1849 {
1850         int bucket = yaffs_find_nice_bucket(dev);
1851         int found = 0;
1852         struct list_head *i;
1853         u32 n = (u32) bucket;
1854
1855         /* Now find an object value that has not already been taken
1856          * by scanning the list.
1857          */
1858
1859         while (!found) {
1860                 found = 1;
1861                 n += YAFFS_NOBJECT_BUCKETS;
1862                 if (1 || dev->obj_bucket[bucket].count > 0) {
1863                         list_for_each(i, &dev->obj_bucket[bucket].list) {
1864                                 /* If there is already one in the list */
1865                                 if (i && list_entry(i, struct yaffs_obj,
1866                                                     hash_link)->obj_id == n) {
1867                                         found = 0;
1868                                 }
1869                         }
1870                 }
1871         }
1872         return n;
1873 }
1874
1875 static void yaffs_hash_obj(struct yaffs_obj *in)
1876 {
1877         int bucket = yaffs_hash_fn(in->obj_id);
1878         struct yaffs_dev *dev = in->my_dev;
1879
1880         list_add(&in->hash_link, &dev->obj_bucket[bucket].list);
1881         dev->obj_bucket[bucket].count++;
1882 }
1883
1884 struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number)
1885 {
1886         int bucket = yaffs_hash_fn(number);
1887         struct list_head *i;
1888         struct yaffs_obj *in;
1889
1890         list_for_each(i, &dev->obj_bucket[bucket].list) {
1891                 /* Look if it is in the list */
1892                 in = list_entry(i, struct yaffs_obj, hash_link);
1893                 if (in->obj_id == number) {
1894                         /* Don't show if it is defered free */
1895                         if (in->defered_free)
1896                                 return NULL;
1897                         return in;
1898                 }
1899         }
1900
1901         return NULL;
1902 }
1903
1904 static struct yaffs_obj *yaffs_new_obj(struct yaffs_dev *dev, int number,
1905                                 enum yaffs_obj_type type)
1906 {
1907         struct yaffs_obj *the_obj = NULL;
1908         struct yaffs_tnode *tn = NULL;
1909
1910         if (number < 0)
1911                 number = yaffs_new_obj_id(dev);
1912
1913         if (type == YAFFS_OBJECT_TYPE_FILE) {
1914                 tn = yaffs_get_tnode(dev);
1915                 if (!tn)
1916                         return NULL;
1917         }
1918
1919         the_obj = yaffs_alloc_empty_obj(dev);
1920         if (!the_obj) {
1921                 if (tn)
1922                         yaffs_free_tnode(dev, tn);
1923                 return NULL;
1924         }
1925
1926         the_obj->fake = 0;
1927         the_obj->rename_allowed = 1;
1928         the_obj->unlink_allowed = 1;
1929         the_obj->obj_id = number;
1930         yaffs_hash_obj(the_obj);
1931         the_obj->variant_type = type;
1932         yaffs_load_current_time(the_obj, 1, 1);
1933
1934         switch (type) {
1935         case YAFFS_OBJECT_TYPE_FILE:
1936                 the_obj->variant.file_variant.file_size = 0;
1937                 the_obj->variant.file_variant.scanned_size = 0;
1938                 the_obj->variant.file_variant.shrink_size =
1939                                                 yaffs_max_file_size(dev);
1940                 the_obj->variant.file_variant.top_level = 0;
1941                 the_obj->variant.file_variant.top = tn;
1942                 break;
1943         case YAFFS_OBJECT_TYPE_DIRECTORY:
1944                 INIT_LIST_HEAD(&the_obj->variant.dir_variant.children);
1945                 INIT_LIST_HEAD(&the_obj->variant.dir_variant.dirty);
1946                 break;
1947         case YAFFS_OBJECT_TYPE_SYMLINK:
1948         case YAFFS_OBJECT_TYPE_HARDLINK:
1949         case YAFFS_OBJECT_TYPE_SPECIAL:
1950                 /* No action required */
1951                 break;
1952         case YAFFS_OBJECT_TYPE_UNKNOWN:
1953                 /* todo this should not happen */
1954                 break;
1955         }
1956         return the_obj;
1957 }
1958
1959 static struct yaffs_obj *yaffs_create_fake_dir(struct yaffs_dev *dev,
1960                                                int number, u32 mode)
1961 {
1962
1963         struct yaffs_obj *obj =
1964             yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
1965
1966         if (!obj)
1967                 return NULL;
1968
1969         obj->fake = 1;  /* it is fake so it might not use NAND */
1970         obj->rename_allowed = 0;
1971         obj->unlink_allowed = 0;
1972         obj->deleted = 0;
1973         obj->unlinked = 0;
1974         obj->yst_mode = mode;
1975         obj->my_dev = dev;
1976         obj->hdr_chunk = 0;     /* Not a valid chunk. */
1977         return obj;
1978
1979 }
1980
1981
1982 static void yaffs_init_tnodes_and_objs(struct yaffs_dev *dev)
1983 {
1984         int i;
1985
1986         dev->n_obj = 0;
1987         dev->n_tnodes = 0;
1988         yaffs_init_raw_tnodes_and_objs(dev);
1989
1990         for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
1991                 INIT_LIST_HEAD(&dev->obj_bucket[i].list);
1992                 dev->obj_bucket[i].count = 0;
1993         }
1994 }
1995
1996 struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
1997                                                  int number,
1998                                                  enum yaffs_obj_type type)
1999 {
2000         struct yaffs_obj *the_obj = NULL;
2001
2002         if (number > 0)
2003                 the_obj = yaffs_find_by_number(dev, number);
2004
2005         if (!the_obj)
2006                 the_obj = yaffs_new_obj(dev, number, type);
2007
2008         return the_obj;
2009
2010 }
2011
2012 YCHAR *yaffs_clone_str(const YCHAR *str)
2013 {
2014         YCHAR *new_str = NULL;
2015         int len;
2016
2017         if (!str)
2018                 str = _Y("");
2019
2020         len = strnlen(str, YAFFS_MAX_ALIAS_LENGTH);
2021         new_str = kmalloc((len + 1) * sizeof(YCHAR), GFP_NOFS);
2022         if (new_str) {
2023                 strncpy(new_str, str, len);
2024                 new_str[len] = 0;
2025         }
2026         return new_str;
2027
2028 }
2029 /*
2030  *yaffs_update_parent() handles fixing a directories mtime and ctime when a new
2031  * link (ie. name) is created or deleted in the directory.
2032  *
2033  * ie.
2034  *   create dir/a : update dir's mtime/ctime
2035  *   rm dir/a:   update dir's mtime/ctime
2036  *   modify dir/a: don't update dir's mtimme/ctime
2037  *
2038  * This can be handled immediately or defered. Defering helps reduce the number
2039  * of updates when many files in a directory are changed within a brief period.
2040  *
2041  * If the directory updating is defered then yaffs_update_dirty_dirs must be
2042  * called periodically.
2043  */
2044
2045 static void yaffs_update_parent(struct yaffs_obj *obj)
2046 {
2047         struct yaffs_dev *dev;
2048
2049         if (!obj)
2050                 return;
2051         dev = obj->my_dev;
2052         obj->dirty = 1;
2053         yaffs_load_current_time(obj, 0, 1);
2054         if (dev->param.defered_dir_update) {
2055                 struct list_head *link = &obj->variant.dir_variant.dirty;
2056
2057                 if (list_empty(link)) {
2058                         list_add(link, &dev->dirty_dirs);
2059                         yaffs_trace(YAFFS_TRACE_BACKGROUND,
2060                           "Added object %d to dirty directories",
2061                            obj->obj_id);
2062                 }
2063
2064         } else {
2065                 yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
2066         }
2067 }
2068
2069 void yaffs_update_dirty_dirs(struct yaffs_dev *dev)
2070 {
2071         struct list_head *link;
2072         struct yaffs_obj *obj;
2073         struct yaffs_dir_var *d_s;
2074         union yaffs_obj_var *o_v;
2075
2076         yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update dirty directories");
2077
2078         while (!list_empty(&dev->dirty_dirs)) {
2079                 link = dev->dirty_dirs.next;
2080                 list_del_init(link);
2081
2082                 d_s = list_entry(link, struct yaffs_dir_var, dirty);
2083                 o_v = list_entry(d_s, union yaffs_obj_var, dir_variant);
2084                 obj = list_entry(o_v, struct yaffs_obj, variant);
2085
2086                 yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update directory %d",
2087                         obj->obj_id);
2088
2089                 if (obj->dirty)
2090                         yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
2091         }
2092 }
2093
2094 /*
2095  * Mknod (create) a new object.
2096  * equiv_obj only has meaning for a hard link;
2097  * alias_str only has meaning for a symlink.
2098  * rdev only has meaning for devices (a subset of special objects)
2099  */
2100
2101 static struct yaffs_obj *yaffs_create_obj(enum yaffs_obj_type type,
2102                                           struct yaffs_obj *parent,
2103                                           const YCHAR *name,
2104                                           u32 mode,
2105                                           u32 uid,
2106                                           u32 gid,
2107                                           struct yaffs_obj *equiv_obj,
2108                                           const YCHAR *alias_str, u32 rdev)
2109 {
2110         struct yaffs_obj *in;
2111         YCHAR *str = NULL;
2112         struct yaffs_dev *dev = parent->my_dev;
2113
2114         /* Check if the entry exists.
2115          * If it does then fail the call since we don't want a dup. */
2116         if (yaffs_find_by_name(parent, name))
2117                 return NULL;
2118
2119         if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
2120                 str = yaffs_clone_str(alias_str);
2121                 if (!str)
2122                         return NULL;
2123         }
2124
2125         in = yaffs_new_obj(dev, -1, type);
2126
2127         if (!in) {
2128                 kfree(str);
2129                 return NULL;
2130         }
2131
2132         in->hdr_chunk = 0;
2133         in->valid = 1;
2134         in->variant_type = type;
2135
2136         in->yst_mode = mode;
2137
2138         yaffs_attribs_init(in, gid, uid, rdev);
2139
2140         in->n_data_chunks = 0;
2141
2142         yaffs_set_obj_name(in, name);
2143         in->dirty = 1;
2144
2145         yaffs_add_obj_to_dir(parent, in);
2146
2147         in->my_dev = parent->my_dev;
2148
2149         switch (type) {
2150         case YAFFS_OBJECT_TYPE_SYMLINK:
2151                 in->variant.symlink_variant.alias = str;
2152                 break;
2153         case YAFFS_OBJECT_TYPE_HARDLINK:
2154                 in->variant.hardlink_variant.equiv_obj = equiv_obj;
2155                 in->variant.hardlink_variant.equiv_id = equiv_obj->obj_id;
2156                 list_add(&in->hard_links, &equiv_obj->hard_links);
2157                 break;
2158         case YAFFS_OBJECT_TYPE_FILE:
2159         case YAFFS_OBJECT_TYPE_DIRECTORY:
2160         case YAFFS_OBJECT_TYPE_SPECIAL:
2161         case YAFFS_OBJECT_TYPE_UNKNOWN:
2162                 /* do nothing */
2163                 break;
2164         }
2165
2166         if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) {
2167                 /* Could not create the object header, fail */
2168                 yaffs_del_obj(in);
2169                 in = NULL;
2170         }
2171
2172         if (in)
2173                 yaffs_update_parent(parent);
2174
2175         return in;
2176 }
2177
2178 struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
2179                                     const YCHAR *name, u32 mode, u32 uid,
2180                                     u32 gid)
2181 {
2182         return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
2183                                 uid, gid, NULL, NULL, 0);
2184 }
2185
2186 struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name,
2187                                    u32 mode, u32 uid, u32 gid)
2188 {
2189         return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
2190                                 mode, uid, gid, NULL, NULL, 0);
2191 }
2192
2193 struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
2194                                        const YCHAR *name, u32 mode, u32 uid,
2195                                        u32 gid, u32 rdev)
2196 {
2197         return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
2198                                 uid, gid, NULL, NULL, rdev);
2199 }
2200
2201 struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
2202                                        const YCHAR *name, u32 mode, u32 uid,
2203                                        u32 gid, const YCHAR *alias)
2204 {
2205         return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
2206                                 uid, gid, NULL, alias, 0);
2207 }
2208
2209 /* yaffs_link_obj returns the object id of the equivalent object.*/
2210 struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name,
2211                                  struct yaffs_obj *equiv_obj)
2212 {
2213         /* Get the real object in case we were fed a hard link obj */
2214         equiv_obj = yaffs_get_equivalent_obj(equiv_obj);
2215
2216         if (yaffs_create_obj(YAFFS_OBJECT_TYPE_HARDLINK,
2217                         parent, name, 0, 0, 0,
2218                         equiv_obj, NULL, 0))
2219                 return equiv_obj;
2220
2221         return NULL;
2222
2223 }
2224
2225
2226
2227 /*---------------------- Block Management and Page Allocation -------------*/
2228
2229 static void yaffs_deinit_blocks(struct yaffs_dev *dev)
2230 {
2231         if (dev->block_info_alt && dev->block_info)
2232                 vfree(dev->block_info);
2233         else
2234                 kfree(dev->block_info);
2235
2236         dev->block_info_alt = 0;
2237
2238         dev->block_info = NULL;
2239
2240         if (dev->chunk_bits_alt && dev->chunk_bits)
2241                 vfree(dev->chunk_bits);
2242         else
2243                 kfree(dev->chunk_bits);
2244         dev->chunk_bits_alt = 0;
2245         dev->chunk_bits = NULL;
2246 }
2247
2248 static int yaffs_init_blocks(struct yaffs_dev *dev)
2249 {
2250         int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
2251
2252         dev->block_info = NULL;
2253         dev->chunk_bits = NULL;
2254         dev->alloc_block = -1;  /* force it to get a new one */
2255
2256         /* If the first allocation strategy fails, thry the alternate one */
2257         dev->block_info =
2258                 kmalloc(n_blocks * sizeof(struct yaffs_block_info), GFP_NOFS);
2259         if (!dev->block_info) {
2260                 dev->block_info =
2261                     vmalloc(n_blocks * sizeof(struct yaffs_block_info));
2262                 dev->block_info_alt = 1;
2263         } else {
2264                 dev->block_info_alt = 0;
2265         }
2266
2267         if (!dev->block_info)
2268                 goto alloc_error;
2269
2270         /* Set up dynamic blockinfo stuff. Round up bytes. */
2271         dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8;
2272         dev->chunk_bits =
2273                 kmalloc(dev->chunk_bit_stride * n_blocks, GFP_NOFS);
2274         if (!dev->chunk_bits) {
2275                 dev->chunk_bits =
2276                     vmalloc(dev->chunk_bit_stride * n_blocks);
2277                 dev->chunk_bits_alt = 1;
2278         } else {
2279                 dev->chunk_bits_alt = 0;
2280         }
2281         if (!dev->chunk_bits)
2282                 goto alloc_error;
2283
2284
2285         memset(dev->block_info, 0, n_blocks * sizeof(struct yaffs_block_info));
2286         memset(dev->chunk_bits, 0, dev->chunk_bit_stride * n_blocks);
2287         return YAFFS_OK;
2288
2289 alloc_error:
2290         yaffs_deinit_blocks(dev);
2291         return YAFFS_FAIL;
2292 }
2293
2294
2295 void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no)
2296 {
2297         struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no);
2298         int erased_ok = 0;
2299         int i;
2300
2301         /* If the block is still healthy erase it and mark as clean.
2302          * If the block has had a data failure, then retire it.
2303          */
2304
2305         yaffs_trace(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
2306                 "yaffs_block_became_dirty block %d state %d %s",
2307                 block_no, bi->block_state,
2308                 (bi->needs_retiring) ? "needs retiring" : "");
2309
2310         yaffs2_clear_oldest_dirty_seq(dev, bi);
2311
2312         bi->block_state = YAFFS_BLOCK_STATE_DIRTY;
2313
2314         /* If this is the block being garbage collected then stop gc'ing */
2315         if (block_no == dev->gc_block)
2316                 dev->gc_block = 0;
2317
2318         /* If this block is currently the best candidate for gc
2319          * then drop as a candidate */
2320         if (block_no == dev->gc_dirtiest) {
2321                 dev->gc_dirtiest = 0;
2322                 dev->gc_pages_in_use = 0;
2323         }
2324
2325         if (!bi->needs_retiring) {
2326                 yaffs2_checkpt_invalidate(dev);
2327                 erased_ok = yaffs_erase_block(dev, block_no);
2328                 if (!erased_ok) {
2329                         dev->n_erase_failures++;
2330                         yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
2331                           "**>> Erasure failed %d", block_no);
2332                 }
2333         }
2334
2335         /* Verify erasure if needed */
2336         if (erased_ok &&
2337             ((yaffs_trace_mask & YAFFS_TRACE_ERASE) ||
2338              !yaffs_skip_verification(dev))) {
2339                 for (i = 0; i < dev->param.chunks_per_block; i++) {
2340                         if (!yaffs_check_chunk_erased(dev,
2341                                 block_no * dev->param.chunks_per_block + i)) {
2342                                 yaffs_trace(YAFFS_TRACE_ERROR,
2343                                         ">>Block %d erasure supposedly OK, but chunk %d not erased",
2344                                         block_no, i);
2345                         }
2346                 }
2347         }
2348
2349         if (!erased_ok) {
2350                 /* We lost a block of free space */
2351                 dev->n_free_chunks -= dev->param.chunks_per_block;
2352                 yaffs_retire_block(dev, block_no);
2353                 yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
2354                         "**>> Block %d retired", block_no);
2355                 return;
2356         }
2357
2358         /* Clean it up... */
2359         bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
2360         bi->seq_number = 0;
2361         dev->n_erased_blocks++;
2362         bi->pages_in_use = 0;
2363         bi->soft_del_pages = 0;
2364         bi->has_shrink_hdr = 0;
2365         bi->skip_erased_check = 1;      /* Clean, so no need to check */
2366         bi->gc_prioritise = 0;
2367         bi->has_summary = 0;
2368
2369         yaffs_clear_chunk_bits(dev, block_no);
2370
2371         yaffs_trace(YAFFS_TRACE_ERASE, "Erased block %d", block_no);
2372 }
2373
2374 static inline int yaffs_gc_process_chunk(struct yaffs_dev *dev,
2375                                         struct yaffs_block_info *bi,
2376                                         int old_chunk, u8 *buffer)
2377 {
2378         int new_chunk;
2379         int mark_flash = 1;
2380         struct yaffs_ext_tags tags;
2381         struct yaffs_obj *object;
2382         int matching_chunk;
2383         int ret_val = YAFFS_OK;
2384
2385         memset(&tags, 0, sizeof(tags));
2386         yaffs_rd_chunk_tags_nand(dev, old_chunk,
2387                                  buffer, &tags);
2388         object = yaffs_find_by_number(dev, tags.obj_id);
2389
2390         yaffs_trace(YAFFS_TRACE_GC_DETAIL,
2391                 "Collecting chunk in block %d, %d %d %d ",
2392                 dev->gc_chunk, tags.obj_id,
2393                 tags.chunk_id, tags.n_bytes);
2394
2395         if (object && !yaffs_skip_verification(dev)) {
2396                 if (tags.chunk_id == 0)
2397                         matching_chunk =
2398                             object->hdr_chunk;
2399                 else if (object->soft_del)
2400                         /* Defeat the test */
2401                         matching_chunk = old_chunk;
2402                 else
2403                         matching_chunk =
2404                             yaffs_find_chunk_in_file
2405                             (object, tags.chunk_id,
2406                              NULL);
2407
2408                 if (old_chunk != matching_chunk)
2409                         yaffs_trace(YAFFS_TRACE_ERROR,
2410                                 "gc: page in gc mismatch: %d %d %d %d",
2411                                 old_chunk,
2412                                 matching_chunk,
2413                                 tags.obj_id,
2414                                 tags.chunk_id);
2415         }
2416
2417         if (!object) {
2418                 yaffs_trace(YAFFS_TRACE_ERROR,
2419                         "page %d in gc has no object: %d %d %d ",
2420                         old_chunk,
2421                         tags.obj_id, tags.chunk_id,
2422                         tags.n_bytes);
2423         }
2424
2425         if (object &&
2426             object->deleted &&
2427             object->soft_del && tags.chunk_id != 0) {
2428                 /* Data chunk in a soft deleted file,
2429                  * throw it away.
2430                  * It's a soft deleted data chunk,
2431                  * No need to copy this, just forget
2432                  * about it and fix up the object.
2433                  */
2434
2435                 /* Free chunks already includes
2436                  * softdeleted chunks, how ever this
2437                  * chunk is going to soon be really
2438                  * deleted which will increment free
2439                  * chunks. We have to decrement free
2440                  * chunks so this works out properly.
2441                  */
2442                 dev->n_free_chunks--;
2443                 bi->soft_del_pages--;
2444
2445                 object->n_data_chunks--;
2446                 if (object->n_data_chunks <= 0) {
2447                         /* remeber to clean up obj */
2448                         dev->gc_cleanup_list[dev->n_clean_ups] = tags.obj_id;
2449                         dev->n_clean_ups++;
2450                 }
2451                 mark_flash = 0;
2452         } else if (object) {
2453                 /* It's either a data chunk in a live
2454                  * file or an ObjectHeader, so we're
2455                  * interested in it.
2456                  * NB Need to keep the ObjectHeaders of
2457                  * deleted files until the whole file
2458                  * has been deleted off
2459                  */
2460                 tags.serial_number++;
2461                 dev->n_gc_copies++;
2462
2463                 if (tags.chunk_id == 0) {
2464                         /* It is an object Id,
2465                          * We need to nuke the
2466                          * shrinkheader flags since its
2467                          * work is done.
2468                          * Also need to clean up
2469                          * shadowing.
2470                          */
2471                         struct yaffs_obj_hdr *oh;
2472                         oh = (struct yaffs_obj_hdr *) buffer;
2473
2474                         oh->is_shrink = 0;
2475                         tags.extra_is_shrink = 0;
2476                         oh->shadows_obj = 0;
2477                         oh->inband_shadowed_obj_id = 0;
2478                         tags.extra_shadows = 0;
2479
2480                         /* Update file size */
2481                         if (object->variant_type == YAFFS_OBJECT_TYPE_FILE) {
2482                                 yaffs_oh_size_load(oh,
2483                                     object->variant.file_variant.file_size);
2484                                 tags.extra_file_size =
2485                                     object->variant.file_variant.file_size;
2486                         }
2487
2488                         yaffs_verify_oh(object, oh, &tags, 1);
2489                         new_chunk =
2490                             yaffs_write_new_chunk(dev, (u8 *) oh, &tags, 1);
2491                 } else {
2492                         new_chunk =
2493                             yaffs_write_new_chunk(dev, buffer, &tags, 1);
2494                 }
2495
2496                 if (new_chunk < 0) {
2497                         ret_val = YAFFS_FAIL;
2498                 } else {
2499
2500                         /* Now fix up the Tnodes etc. */
2501
2502                         if (tags.chunk_id == 0) {
2503                                 /* It's a header */
2504                                 object->hdr_chunk = new_chunk;
2505                                 object->serial = tags.serial_number;
2506                         } else {
2507                                 /* It's a data chunk */
2508                                 yaffs_put_chunk_in_file(object, tags.chunk_id,
2509                                                         new_chunk, 0);
2510                         }
2511                 }
2512         }
2513         if (ret_val == YAFFS_OK)
2514                 yaffs_chunk_del(dev, old_chunk, mark_flash, __LINE__);
2515         return ret_val;
2516 }
2517
2518 static int yaffs_gc_block(struct yaffs_dev *dev, int block, int whole_block)
2519 {
2520         int old_chunk;
2521         int ret_val = YAFFS_OK;
2522         int i;
2523         int is_checkpt_block;
2524         int max_copies;
2525         int chunks_before = yaffs_get_erased_chunks(dev);
2526         int chunks_after;
2527         struct yaffs_block_info *bi = yaffs_get_block_info(dev, block);
2528
2529         is_checkpt_block = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT);
2530
2531         yaffs_trace(YAFFS_TRACE_TRACING,
2532                 "Collecting block %d, in use %d, shrink %d, whole_block %d",
2533                 block, bi->pages_in_use, bi->has_shrink_hdr,
2534                 whole_block);
2535
2536         /*yaffs_verify_free_chunks(dev); */
2537
2538         if (bi->block_state == YAFFS_BLOCK_STATE_FULL)
2539                 bi->block_state = YAFFS_BLOCK_STATE_COLLECTING;
2540
2541         bi->has_shrink_hdr = 0; /* clear the flag so that the block can erase */
2542
2543         dev->gc_disable = 1;
2544
2545         yaffs_summary_gc(dev, block);
2546
2547         if (is_checkpt_block || !yaffs_still_some_chunks(dev, block)) {
2548                 yaffs_trace(YAFFS_TRACE_TRACING,
2549                         "Collecting block %d that has no chunks in use",
2550                         block);
2551                 yaffs_block_became_dirty(dev, block);
2552         } else {
2553
2554                 u8 *buffer = yaffs_get_temp_buffer(dev);
2555
2556                 yaffs_verify_blk(dev, bi, block);
2557
2558                 max_copies = (whole_block) ? dev->param.chunks_per_block : 5;
2559                 old_chunk = block * dev->param.chunks_per_block + dev->gc_chunk;
2560
2561                 for (/* init already done */ ;
2562                      ret_val == YAFFS_OK &&
2563                      dev->gc_chunk < dev->param.chunks_per_block &&
2564                      (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) &&
2565                      max_copies > 0;
2566                      dev->gc_chunk++, old_chunk++) {
2567                         if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) {
2568                                 /* Page is in use and might need to be copied */
2569                                 max_copies--;
2570                                 ret_val = yaffs_gc_process_chunk(dev, bi,
2571                                                         old_chunk, buffer);
2572                         }
2573                 }
2574                 yaffs_release_temp_buffer(dev, buffer);
2575         }
2576
2577         yaffs_verify_collected_blk(dev, bi, block);
2578
2579         if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
2580                 /*
2581                  * The gc did not complete. Set block state back to FULL
2582                  * because checkpointing does not restore gc.
2583                  */
2584                 bi->block_state = YAFFS_BLOCK_STATE_FULL;
2585         } else {
2586                 /* The gc completed. */
2587                 /* Do any required cleanups */
2588                 for (i = 0; i < dev->n_clean_ups; i++) {
2589                         /* Time to delete the file too */
2590                         struct yaffs_obj *object =
2591                             yaffs_find_by_number(dev, dev->gc_cleanup_list[i]);
2592                         if (object) {
2593                                 yaffs_free_tnode(dev,
2594                                           object->variant.file_variant.top);
2595                                 object->variant.file_variant.top = NULL;
2596                                 yaffs_trace(YAFFS_TRACE_GC,
2597                                         "yaffs: About to finally delete object %d",
2598                                         object->obj_id);
2599                                 yaffs_generic_obj_del(object);
2600                                 object->my_dev->n_deleted_files--;
2601                         }
2602
2603                 }
2604                 chunks_after = yaffs_get_erased_chunks(dev);
2605                 if (chunks_before >= chunks_after)
2606                         yaffs_trace(YAFFS_TRACE_GC,
2607                                 "gc did not increase free chunks before %d after %d",
2608                                 chunks_before, chunks_after);
2609                 dev->gc_block = 0;
2610                 dev->gc_chunk = 0;
2611                 dev->n_clean_ups = 0;
2612         }
2613
2614         dev->gc_disable = 0;
2615
2616         return ret_val;
2617 }
2618
2619 /*
2620  * find_gc_block() selects the dirtiest block (or close enough)
2621  * for garbage collection.
2622  */
2623
2624 static unsigned yaffs_find_gc_block(struct yaffs_dev *dev,
2625                                     int aggressive, int background)
2626 {
2627         int i;
2628         int iterations;
2629         unsigned selected = 0;
2630         int prioritised = 0;
2631         int prioritised_exist = 0;
2632         struct yaffs_block_info *bi;
2633         int threshold;
2634
2635         /* First let's see if we need to grab a prioritised block */
2636         if (dev->has_pending_prioritised_gc && !aggressive) {
2637                 dev->gc_dirtiest = 0;
2638                 bi = dev->block_info;
2639                 for (i = dev->internal_start_block;
2640                      i <= dev->internal_end_block && !selected; i++) {
2641
2642                         if (bi->gc_prioritise) {
2643                                 prioritised_exist = 1;
2644                                 if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
2645                                     yaffs_block_ok_for_gc(dev, bi)) {
2646                                         selected = i;
2647                                         prioritised = 1;
2648                                 }
2649                         }
2650                         bi++;
2651                 }
2652
2653                 /*
2654                  * If there is a prioritised block and none was selected then
2655                  * this happened because there is at least one old dirty block
2656                  * gumming up the works. Let's gc the oldest dirty block.
2657                  */
2658
2659                 if (prioritised_exist &&
2660                     !selected && dev->oldest_dirty_block > 0)
2661                         selected = dev->oldest_dirty_block;
2662
2663                 if (!prioritised_exist) /* None found, so we can clear this */
2664                         dev->has_pending_prioritised_gc = 0;
2665         }
2666
2667         /* If we're doing aggressive GC then we are happy to take a less-dirty
2668          * block, and search harder.
2669          * else (leasurely gc), then we only bother to do this if the
2670          * block has only a few pages in use.
2671          */
2672
2673         if (!selected) {
2674                 int pages_used;
2675                 int n_blocks =
2676                     dev->internal_end_block - dev->internal_start_block + 1;
2677                 if (aggressive) {
2678                         threshold = dev->param.chunks_per_block;
2679                         iterations = n_blocks;
2680                 } else {
2681                         int max_threshold;
2682
2683                         if (background)
2684                                 max_threshold = dev->param.chunks_per_block / 2;
2685                         else
2686                                 max_threshold = dev->param.chunks_per_block / 8;
2687
2688                         if (max_threshold < YAFFS_GC_PASSIVE_THRESHOLD)
2689                                 max_threshold = YAFFS_GC_PASSIVE_THRESHOLD;
2690
2691                         threshold = background ? (dev->gc_not_done + 2) * 2 : 0;
2692                         if (threshold < YAFFS_GC_PASSIVE_THRESHOLD)
2693                                 threshold = YAFFS_GC_PASSIVE_THRESHOLD;
2694                         if (threshold > max_threshold)
2695                                 threshold = max_threshold;
2696
2697                         iterations = n_blocks / 16 + 1;
2698                         if (iterations > 100)
2699                                 iterations = 100;
2700                 }
2701
2702                 for (i = 0;
2703                      i < iterations &&
2704                      (dev->gc_dirtiest < 1 ||
2705                       dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH);
2706                      i++) {
2707                         dev->gc_block_finder++;
2708                         if (dev->gc_block_finder < dev->internal_start_block ||
2709                             dev->gc_block_finder > dev->internal_end_block)
2710                                 dev->gc_block_finder =
2711                                     dev->internal_start_block;
2712
2713                         bi = yaffs_get_block_info(dev, dev->gc_block_finder);
2714
2715                         pages_used = bi->pages_in_use - bi->soft_del_pages;
2716
2717                         if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
2718                             pages_used < dev->param.chunks_per_block &&
2719                             (dev->gc_dirtiest < 1 ||
2720                              pages_used < dev->gc_pages_in_use) &&
2721                             yaffs_block_ok_for_gc(dev, bi)) {
2722                                 dev->gc_dirtiest = dev->gc_block_finder;
2723                                 dev->gc_pages_in_use = pages_used;
2724                         }
2725                 }
2726
2727                 if (dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold)
2728                         selected = dev->gc_dirtiest;
2729         }
2730
2731         /*
2732          * If nothing has been selected for a while, try the oldest dirty
2733          * because that's gumming up the works.
2734          */
2735
2736         if (!selected && dev->param.is_yaffs2 &&
2737             dev->gc_not_done >= (background ? 10 : 20)) {
2738                 yaffs2_find_oldest_dirty_seq(dev);
2739                 if (dev->oldest_dirty_block > 0) {
2740                         selected = dev->oldest_dirty_block;
2741                         dev->gc_dirtiest = selected;
2742                         dev->oldest_dirty_gc_count++;
2743                         bi = yaffs_get_block_info(dev, selected);
2744                         dev->gc_pages_in_use =
2745                             bi->pages_in_use - bi->soft_del_pages;
2746                 } else {
2747                         dev->gc_not_done = 0;
2748                 }
2749         }
2750
2751         if (selected) {
2752                 yaffs_trace(YAFFS_TRACE_GC,
2753                         "GC Selected block %d with %d free, prioritised:%d",
2754                         selected,
2755                         dev->param.chunks_per_block - dev->gc_pages_in_use,
2756                         prioritised);
2757
2758                 dev->n_gc_blocks++;
2759                 if (background)
2760                         dev->bg_gcs++;
2761
2762                 dev->gc_dirtiest = 0;
2763                 dev->gc_pages_in_use = 0;
2764                 dev->gc_not_done = 0;
2765                 if (dev->refresh_skip > 0)
2766                         dev->refresh_skip--;
2767         } else {
2768                 dev->gc_not_done++;
2769                 yaffs_trace(YAFFS_TRACE_GC,
2770                         "GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s",
2771                         dev->gc_block_finder, dev->gc_not_done, threshold,
2772                         dev->gc_dirtiest, dev->gc_pages_in_use,
2773                         dev->oldest_dirty_block, background ? " bg" : "");
2774         }
2775
2776         return selected;
2777 }
2778
2779 /* New garbage collector
2780  * If we're very low on erased blocks then we do aggressive garbage collection
2781  * otherwise we do "leasurely" garbage collection.
2782  * Aggressive gc looks further (whole array) and will accept less dirty blocks.
2783  * Passive gc only inspects smaller areas and only accepts more dirty blocks.
2784  *
2785  * The idea is to help clear out space in a more spread-out manner.
2786  * Dunno if it really does anything useful.
2787  */
2788 static int yaffs_check_gc(struct yaffs_dev *dev, int background)
2789 {
2790         int aggressive = 0;
2791         int gc_ok = YAFFS_OK;
2792         int max_tries = 0;
2793         int min_erased;
2794         int erased_chunks;
2795         int checkpt_block_adjust;
2796
2797         if (dev->param.gc_control && (dev->param.gc_control(dev) & 1) == 0)
2798                 return YAFFS_OK;
2799
2800         if (dev->gc_disable)
2801                 /* Bail out so we don't get recursive gc */
2802                 return YAFFS_OK;
2803
2804         /* This loop should pass the first time.
2805          * Only loops here if the collection does not increase space.
2806          */
2807
2808         do {
2809                 max_tries++;
2810
2811                 checkpt_block_adjust = yaffs_calc_checkpt_blocks_required(dev);
2812
2813                 min_erased =
2814                     dev->param.n_reserved_blocks + checkpt_block_adjust + 1;
2815                 erased_chunks =
2816                     dev->n_erased_blocks * dev->param.chunks_per_block;
2817
2818                 /* If we need a block soon then do aggressive gc. */
2819                 if (dev->n_erased_blocks < min_erased)
2820                         aggressive = 1;
2821                 else {
2822                         if (!background
2823                             && erased_chunks > (dev->n_free_chunks / 4))
2824                                 break;
2825
2826                         if (dev->gc_skip > 20)
2827                                 dev->gc_skip = 20;
2828                         if (erased_chunks < dev->n_free_chunks / 2 ||
2829                             dev->gc_skip < 1 || background)
2830                                 aggressive = 0;
2831                         else {
2832                                 dev->gc_skip--;
2833                                 break;
2834                         }
2835                 }
2836
2837                 dev->gc_skip = 5;
2838
2839                 /* If we don't already have a block being gc'd then see if we
2840                  * should start another */
2841
2842                 if (dev->gc_block < 1 && !aggressive) {
2843                         dev->gc_block = yaffs2_find_refresh_block(dev);
2844                         dev->gc_chunk = 0;
2845                         dev->n_clean_ups = 0;
2846                 }
2847                 if (dev->gc_block < 1) {
2848                         dev->gc_block =
2849                             yaffs_find_gc_block(dev, aggressive, background);
2850                         dev->gc_chunk = 0;
2851                         dev->n_clean_ups = 0;
2852                 }
2853
2854                 if (dev->gc_block > 0) {
2855                         dev->all_gcs++;
2856                         if (!aggressive)
2857                                 dev->passive_gc_count++;
2858
2859                         yaffs_trace(YAFFS_TRACE_GC,
2860                                 "yaffs: GC n_erased_blocks %d aggressive %d",
2861                                 dev->n_erased_blocks, aggressive);
2862
2863                         gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive);
2864                 }
2865
2866                 if (dev->n_erased_blocks < (dev->param.n_reserved_blocks) &&
2867                     dev->gc_block > 0) {
2868                         yaffs_trace(YAFFS_TRACE_GC,
2869                                 "yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d",
2870                                 dev->n_erased_blocks, max_tries,
2871                                 dev->gc_block);
2872                 }
2873         } while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) &&
2874                  (dev->gc_block > 0) && (max_tries < 2));
2875
2876         return aggressive ? gc_ok : YAFFS_OK;
2877 }
2878
2879 /*
2880  * yaffs_bg_gc()
2881  * Garbage collects. Intended to be called from a background thread.
2882  * Returns non-zero if at least half the free chunks are erased.
2883  */
2884 int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency)
2885 {
2886         int erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block;
2887
2888         yaffs_trace(YAFFS_TRACE_BACKGROUND, "Background gc %u", urgency);
2889
2890         yaffs_check_gc(dev, 1);
2891         return erased_chunks > dev->n_free_chunks / 2;
2892 }
2893
2894 /*-------------------- Data file manipulation -----------------*/
2895
2896 static int yaffs_rd_data_obj(struct yaffs_obj *in, int inode_chunk, u8 * buffer)
2897 {
2898         int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL);
2899
2900         if (nand_chunk >= 0)
2901                 return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk,
2902                                                 buffer, NULL);
2903         else {
2904                 yaffs_trace(YAFFS_TRACE_NANDACCESS,
2905                         "Chunk %d not found zero instead",
2906                         nand_chunk);
2907                 /* get sane (zero) data if you read a hole */
2908                 memset(buffer, 0, in->my_dev->data_bytes_per_chunk);
2909                 return 0;
2910         }
2911
2912 }
2913
2914 void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
2915                      int lyn)
2916 {
2917         int block;
2918         int page;
2919         struct yaffs_ext_tags tags;
2920         struct yaffs_block_info *bi;
2921
2922         if (chunk_id <= 0)
2923                 return;
2924
2925         dev->n_deletions++;
2926         block = chunk_id / dev->param.chunks_per_block;
2927         page = chunk_id % dev->param.chunks_per_block;
2928
2929         if (!yaffs_check_chunk_bit(dev, block, page))
2930                 yaffs_trace(YAFFS_TRACE_VERIFY,
2931                         "Deleting invalid chunk %d", chunk_id);
2932
2933         bi = yaffs_get_block_info(dev, block);
2934
2935         yaffs2_update_oldest_dirty_seq(dev, block, bi);
2936
2937         yaffs_trace(YAFFS_TRACE_DELETION,
2938                 "line %d delete of chunk %d",
2939                 lyn, chunk_id);
2940
2941         if (!dev->param.is_yaffs2 && mark_flash &&
2942             bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) {
2943
2944                 memset(&tags, 0, sizeof(tags));
2945                 tags.is_deleted = 1;
2946                 yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags);
2947                 yaffs_handle_chunk_update(dev, chunk_id, &tags);
2948         } else {
2949                 dev->n_unmarked_deletions++;
2950         }
2951
2952         /* Pull out of the management area.
2953          * If the whole block became dirty, this will kick off an erasure.
2954          */
2955         if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING ||
2956             bi->block_state == YAFFS_BLOCK_STATE_FULL ||
2957             bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
2958             bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
2959                 dev->n_free_chunks++;
2960                 yaffs_clear_chunk_bit(dev, block, page);
2961                 bi->pages_in_use--;
2962
2963                 if (bi->pages_in_use == 0 &&
2964                     !bi->has_shrink_hdr &&
2965                     bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING &&
2966                     bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCAN) {
2967                         yaffs_block_became_dirty(dev, block);
2968                 }
2969         }
2970 }
2971
2972 static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
2973                              const u8 *buffer, int n_bytes, int use_reserve)
2974 {
2975         /* Find old chunk Need to do this to get serial number
2976          * Write new one and patch into tree.
2977          * Invalidate old tags.
2978          */
2979
2980         int prev_chunk_id;
2981         struct yaffs_ext_tags prev_tags;
2982         int new_chunk_id;
2983         struct yaffs_ext_tags new_tags;
2984         struct yaffs_dev *dev = in->my_dev;
2985
2986         yaffs_check_gc(dev, 0);
2987
2988         /* Get the previous chunk at this location in the file if it exists.
2989          * If it does not exist then put a zero into the tree. This creates
2990          * the tnode now, rather than later when it is harder to clean up.
2991          */
2992         prev_chunk_id = yaffs_find_chunk_in_file(in, inode_chunk, &prev_tags);
2993         if (prev_chunk_id < 1 &&
2994             !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0))
2995                 return 0;
2996
2997         /* Set up new tags */
2998         memset(&new_tags, 0, sizeof(new_tags));
2999
3000         new_tags.chunk_id = inode_chunk;
3001         new_tags.obj_id = in->obj_id;
3002         new_tags.serial_number =
3003             (prev_chunk_id > 0) ? prev_tags.serial_number + 1 : 1;
3004         new_tags.n_bytes = n_bytes;
3005
3006         if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) {
3007                 yaffs_trace(YAFFS_TRACE_ERROR,
3008                   "Writing %d bytes to chunk!!!!!!!!!",
3009                    n_bytes);
3010                 BUG();
3011         }
3012
3013         new_chunk_id =
3014             yaffs_write_new_chunk(dev, buffer, &new_tags, use_reserve);
3015
3016         if (new_chunk_id > 0) {
3017                 yaffs_put_chunk_in_file(in, inode_chunk, new_chunk_id, 0);
3018
3019                 if (prev_chunk_id > 0)
3020                         yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
3021
3022                 yaffs_verify_file_sane(in);
3023         }
3024         return new_chunk_id;
3025
3026 }
3027
3028
3029
3030 static int yaffs_do_xattrib_mod(struct yaffs_obj *obj, int set,
3031                                 const YCHAR *name, const void *value, int size,
3032                                 int flags)
3033 {
3034         struct yaffs_xattr_mod xmod;
3035         int result;
3036
3037         xmod.set = set;
3038         xmod.name = name;
3039         xmod.data = value;
3040         xmod.size = size;
3041         xmod.flags = flags;
3042         xmod.result = -ENOSPC;
3043
3044         result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod);
3045
3046         if (result > 0)
3047                 return xmod.result;
3048         else
3049                 return -ENOSPC;
3050 }
3051
3052 static int yaffs_apply_xattrib_mod(struct yaffs_obj *obj, char *buffer,
3053                                    struct yaffs_xattr_mod *xmod)
3054 {
3055         int retval = 0;
3056         int x_offs = sizeof(struct yaffs_obj_hdr);
3057         struct yaffs_dev *dev = obj->my_dev;
3058         int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
3059         char *x_buffer = buffer + x_offs;
3060
3061         if (xmod->set)
3062                 retval =
3063                     nval_set(x_buffer, x_size, xmod->name, xmod->data,
3064                              xmod->size, xmod->flags);
3065         else
3066                 retval = nval_del(x_buffer, x_size, xmod->name);
3067
3068         obj->has_xattr = nval_hasvalues(x_buffer, x_size);
3069         obj->xattr_known = 1;
3070         xmod->result = retval;
3071
3072         return retval;
3073 }
3074
3075 static int yaffs_do_xattrib_fetch(struct yaffs_obj *obj, const YCHAR *name,
3076                                   void *value, int size)
3077 {
3078         char *buffer = NULL;
3079         int result;
3080         struct yaffs_ext_tags tags;
3081         struct yaffs_dev *dev = obj->my_dev;
3082         int x_offs = sizeof(struct yaffs_obj_hdr);
3083         int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
3084         char *x_buffer;
3085         int retval = 0;
3086
3087         if (obj->hdr_chunk < 1)
3088                 return -ENODATA;
3089
3090         /* If we know that the object has no xattribs then don't do all the
3091          * reading and parsing.
3092          */
3093         if (obj->xattr_known && !obj->has_xattr) {
3094                 if (name)
3095                         return -ENODATA;
3096                 else
3097                         return 0;
3098         }
3099
3100         buffer = (char *)yaffs_get_temp_buffer(dev);
3101         if (!buffer)
3102                 return -ENOMEM;
3103
3104         result =
3105             yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, (u8 *) buffer, &tags);
3106
3107         if (result != YAFFS_OK)
3108                 retval = -ENOENT;
3109         else {
3110                 x_buffer = buffer + x_offs;
3111
3112                 if (!obj->xattr_known) {
3113                         obj->has_xattr = nval_hasvalues(x_buffer, x_size);
3114                         obj->xattr_known = 1;
3115                 }
3116
3117                 if (name)
3118                         retval = nval_get(x_buffer, x_size, name, value, size);
3119                 else
3120                         retval = nval_list(x_buffer, x_size, value, size);
3121         }
3122         yaffs_release_temp_buffer(dev, (u8 *) buffer);
3123         return retval;
3124 }
3125
3126 int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name,
3127                       const void *value, int size, int flags)
3128 {
3129         return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags);
3130 }
3131
3132 int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name)
3133 {
3134         return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0);
3135 }
3136
3137 int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value,
3138                       int size)
3139 {
3140         return yaffs_do_xattrib_fetch(obj, name, value, size);
3141 }
3142
3143 int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size)
3144 {
3145         return yaffs_do_xattrib_fetch(obj, NULL, buffer, size);
3146 }
3147
3148 static void yaffs_check_obj_details_loaded(struct yaffs_obj *in)
3149 {
3150         u8 *buf;
3151         struct yaffs_obj_hdr *oh;
3152         struct yaffs_dev *dev;
3153         struct yaffs_ext_tags tags;
3154         int result;
3155         int alloc_failed = 0;
3156
3157         if (!in || !in->lazy_loaded || in->hdr_chunk < 1)
3158                 return;
3159
3160         dev = in->my_dev;
3161         in->lazy_loaded = 0;
3162         buf = yaffs_get_temp_buffer(dev);
3163
3164         result = yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, buf, &tags);
3165         oh = (struct yaffs_obj_hdr *)buf;
3166
3167         in->yst_mode = oh->yst_mode;
3168         yaffs_load_attribs(in, oh);
3169         yaffs_set_obj_name_from_oh(in, oh);
3170
3171         if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
3172                 in->variant.symlink_variant.alias =
3173                     yaffs_clone_str(oh->alias);
3174                 if (!in->variant.symlink_variant.alias)
3175                         alloc_failed = 1;       /* Not returned */
3176         }
3177         yaffs_release_temp_buffer(dev, buf);
3178 }
3179
3180 static void yaffs_load_name_from_oh(struct yaffs_dev *dev, YCHAR *name,
3181                                     const YCHAR *oh_name, int buff_size)
3182 {
3183 #ifdef CONFIG_YAFFS_AUTO_UNICODE
3184         if (dev->param.auto_unicode) {
3185                 if (*oh_name) {
3186                         /* It is an ASCII name, do an ASCII to
3187                          * unicode conversion */
3188                         const char *ascii_oh_name = (const char *)oh_name;
3189                         int n = buff_size - 1;
3190                         while (n > 0 && *ascii_oh_name) {
3191                                 *name = *ascii_oh_name;
3192                                 name++;
3193                                 ascii_oh_name++;
3194                                 n--;
3195                         }
3196                 } else {
3197                         strncpy(name, oh_name + 1, buff_size - 1);
3198                 }
3199         } else {
3200 #else
3201         dev = dev;
3202         {
3203 #endif
3204                 strncpy(name, oh_name, buff_size - 1);
3205         }
3206 }
3207
3208 static void yaffs_load_oh_from_name(struct yaffs_dev *dev, YCHAR *oh_name,
3209                                     const YCHAR *name)
3210 {
3211 #ifdef CONFIG_YAFFS_AUTO_UNICODE
3212
3213         int is_ascii;
3214         YCHAR *w;
3215
3216         if (dev->param.auto_unicode) {
3217
3218                 is_ascii = 1;
3219                 w = name;
3220
3221                 /* Figure out if the name will fit in ascii character set */
3222                 while (is_ascii && *w) {
3223                         if ((*w) & 0xff00)
3224                                 is_ascii = 0;
3225                         w++;
3226                 }
3227
3228                 if (is_ascii) {
3229                         /* It is an ASCII name, so convert unicode to ascii */
3230                         char *ascii_oh_name = (char *)oh_name;
3231                         int n = YAFFS_MAX_NAME_LENGTH - 1;
3232                         while (n > 0 && *name) {
3233                                 *ascii_oh_name = *name;
3234                                 name++;
3235                                 ascii_oh_name++;
3236                                 n--;
3237                         }
3238                 } else {
3239                         /* Unicode name, so save starting at the second YCHAR */
3240                         *oh_name = 0;
3241                         strncpy(oh_name + 1, name, YAFFS_MAX_NAME_LENGTH - 2);
3242                 }
3243         } else {
3244 #else
3245         dev = dev;
3246         {
3247 #endif
3248                 strncpy(oh_name, name, YAFFS_MAX_NAME_LENGTH - 1);
3249         }
3250 }
3251
3252 /* UpdateObjectHeader updates the header on NAND for an object.
3253  * If name is not NULL, then that new name is used.
3254  */
3255 int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name, int force,
3256                     int is_shrink, int shadows, struct yaffs_xattr_mod *xmod)
3257 {
3258
3259         struct yaffs_block_info *bi;
3260         struct yaffs_dev *dev = in->my_dev;
3261         int prev_chunk_id;
3262         int ret_val = 0;
3263         int result = 0;
3264         int new_chunk_id;
3265         struct yaffs_ext_tags new_tags;
3266         struct yaffs_ext_tags old_tags;
3267         const YCHAR *alias = NULL;
3268         u8 *buffer = NULL;
3269         YCHAR old_name[YAFFS_MAX_NAME_LENGTH + 1];
3270         struct yaffs_obj_hdr *oh = NULL;
3271         loff_t file_size = 0;
3272
3273         strcpy(old_name, _Y("silly old name"));
3274
3275         if (in->fake && in != dev->root_dir && !force && !xmod)
3276                 return ret_val;
3277
3278         yaffs_check_gc(dev, 0);
3279         yaffs_check_obj_details_loaded(in);
3280
3281         buffer = yaffs_get_temp_buffer(in->my_dev);
3282         oh = (struct yaffs_obj_hdr *)buffer;
3283
3284         prev_chunk_id = in->hdr_chunk;
3285
3286         if (prev_chunk_id > 0) {
3287                 result = yaffs_rd_chunk_tags_nand(dev, prev_chunk_id,
3288                                                   buffer, &old_tags);
3289
3290                 yaffs_verify_oh(in, oh, &old_tags, 0);
3291                 memcpy(old_name, oh->name, sizeof(oh->name));
3292                 memset(buffer, 0xff, sizeof(struct yaffs_obj_hdr));
3293         } else {
3294                 memset(buffer, 0xff, dev->data_bytes_per_chunk);
3295         }
3296
3297         oh->type = in->variant_type;
3298         oh->yst_mode = in->yst_mode;
3299         oh->shadows_obj = oh->inband_shadowed_obj_id = shadows;
3300
3301         yaffs_load_attribs_oh(oh, in);
3302
3303         if (in->parent)
3304                 oh->parent_obj_id = in->parent->obj_id;
3305         else
3306                 oh->parent_obj_id = 0;
3307
3308         if (name && *name) {
3309                 memset(oh->name, 0, sizeof(oh->name));
3310                 yaffs_load_oh_from_name(dev, oh->name, name);
3311         } else if (prev_chunk_id > 0) {
3312                 memcpy(oh->name, old_name, sizeof(oh->name));
3313         } else {
3314                 memset(oh->name, 0, sizeof(oh->name));
3315         }
3316
3317         oh->is_shrink = is_shrink;
3318
3319         switch (in->variant_type) {
3320         case YAFFS_OBJECT_TYPE_UNKNOWN:
3321                 /* Should not happen */
3322                 break;
3323         case YAFFS_OBJECT_TYPE_FILE:
3324                 if (oh->parent_obj_id != YAFFS_OBJECTID_DELETED &&
3325                     oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED)
3326                         file_size = in->variant.file_variant.file_size;
3327                 yaffs_oh_size_load(oh, file_size);
3328                 break;
3329         case YAFFS_OBJECT_TYPE_HARDLINK:
3330                 oh->equiv_id = in->variant.hardlink_variant.equiv_id;
3331                 break;
3332         case YAFFS_OBJECT_TYPE_SPECIAL:
3333                 /* Do nothing */
3334                 break;
3335         case YAFFS_OBJECT_TYPE_DIRECTORY:
3336                 /* Do nothing */
3337                 break;
3338         case YAFFS_OBJECT_TYPE_SYMLINK:
3339                 alias = in->variant.symlink_variant.alias;
3340                 if (!alias)
3341                         alias = _Y("no alias");
3342                 strncpy(oh->alias, alias, YAFFS_MAX_ALIAS_LENGTH);
3343                 oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
3344                 break;
3345         }
3346
3347         /* process any xattrib modifications */
3348         if (xmod)
3349                 yaffs_apply_xattrib_mod(in, (char *)buffer, xmod);
3350
3351         /* Tags */
3352         memset(&new_tags, 0, sizeof(new_tags));
3353         in->serial++;
3354         new_tags.chunk_id = 0;
3355         new_tags.obj_id = in->obj_id;
3356         new_tags.serial_number = in->serial;
3357
3358         /* Add extra info for file header */
3359         new_tags.extra_available = 1;
3360         new_tags.extra_parent_id = oh->parent_obj_id;
3361         new_tags.extra_file_size = file_size;
3362         new_tags.extra_is_shrink = oh->is_shrink;
3363         new_tags.extra_equiv_id = oh->equiv_id;
3364         new_tags.extra_shadows = (oh->shadows_obj > 0) ? 1 : 0;
3365         new_tags.extra_obj_type = in->variant_type;
3366         yaffs_verify_oh(in, oh, &new_tags, 1);
3367
3368         /* Create new chunk in NAND */
3369         new_chunk_id =
3370             yaffs_write_new_chunk(dev, buffer, &new_tags,
3371                                   (prev_chunk_id > 0) ? 1 : 0);
3372
3373         if (buffer)
3374                 yaffs_release_temp_buffer(dev, buffer);
3375
3376         if (new_chunk_id < 0)
3377                 return new_chunk_id;
3378
3379         in->hdr_chunk = new_chunk_id;
3380
3381         if (prev_chunk_id > 0)
3382                 yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
3383
3384         if (!yaffs_obj_cache_dirty(in))
3385                 in->dirty = 0;
3386
3387         /* If this was a shrink, then mark the block
3388          * that the chunk lives on */
3389         if (is_shrink) {
3390                 bi = yaffs_get_block_info(in->my_dev,
3391                                           new_chunk_id /
3392                                           in->my_dev->param.chunks_per_block);
3393                 bi->has_shrink_hdr = 1;
3394         }
3395
3396
3397         return new_chunk_id;
3398 }
3399
3400 /*--------------------- File read/write ------------------------
3401  * Read and write have very similar structures.
3402  * In general the read/write has three parts to it
3403  * An incomplete chunk to start with (if the read/write is not chunk-aligned)
3404  * Some complete chunks
3405  * An incomplete chunk to end off with
3406  *
3407  * Curve-balls: the first chunk might also be the last chunk.
3408  */
3409
3410 int yaffs_file_rd(struct yaffs_obj *in, u8 * buffer, loff_t offset, int n_bytes)
3411 {
3412         int chunk;
3413         u32 start;
3414         int n_copy;
3415         int n = n_bytes;
3416         int n_done = 0;
3417         struct yaffs_cache *cache;
3418         struct yaffs_dev *dev;
3419
3420         dev = in->my_dev;
3421