yaffs: Refactor drivers WIP - stress tests passing
[yaffs2.git] / yaffs_guts.c
1 /*
2  * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
3  *
4  * Copyright (C) 2002-2011 Aleph One Ltd.
5  *   for Toby Churchill Ltd and Brightstar Engineering
6  *
7  * Created by Charles Manning <charles@aleph1.co.uk>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13
14 #include "yportenv.h"
15 #include "yaffs_trace.h"
16
17 #include "yaffs_guts.h"
18 #include "yaffs_getblockinfo.h"
19 #include "yaffs_tagscompat.h"
20 #include "yaffs_tagsmarshall.h"
21 #include "yaffs_nand.h"
22 #include "yaffs_yaffs1.h"
23 #include "yaffs_yaffs2.h"
24 #include "yaffs_bitmap.h"
25 #include "yaffs_verify.h"
26 #include "yaffs_nand.h"
27 #include "yaffs_packedtags2.h"
28 #include "yaffs_nameval.h"
29 #include "yaffs_allocator.h"
30 #include "yaffs_attribs.h"
31 #include "yaffs_summary.h"
32
33 /* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */
34 #define YAFFS_GC_GOOD_ENOUGH 2
35 #define YAFFS_GC_PASSIVE_THRESHOLD 4
36
37 #include "yaffs_ecc.h"
38
39 /* Forward declarations */
40
41 static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
42                              const u8 *buffer, int n_bytes, int use_reserve);
43
44
45
46 /* Function to calculate chunk and offset */
47
48 void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
49                                 int *chunk_out, u32 *offset_out)
50 {
51         int chunk;
52         u32 offset;
53
54         chunk = (u32) (addr >> dev->chunk_shift);
55
56         if (dev->chunk_div == 1) {
57                 /* easy power of 2 case */
58                 offset = (u32) (addr & dev->chunk_mask);
59         } else {
60                 /* Non power-of-2 case */
61
62                 loff_t chunk_base;
63
64                 chunk /= dev->chunk_div;
65
66                 chunk_base = ((loff_t) chunk) * dev->data_bytes_per_chunk;
67                 offset = (u32) (addr - chunk_base);
68         }
69
70         *chunk_out = chunk;
71         *offset_out = offset;
72 }
73
74 /* Function to return the number of shifts for a power of 2 greater than or
75  * equal to the given number
76  * Note we don't try to cater for all possible numbers and this does not have to
77  * be hellishly efficient.
78  */
79
80 static inline u32 calc_shifts_ceiling(u32 x)
81 {
82         int extra_bits;
83         int shifts;
84
85         shifts = extra_bits = 0;
86
87         while (x > 1) {
88                 if (x & 1)
89                         extra_bits++;
90                 x >>= 1;
91                 shifts++;
92         }
93
94         if (extra_bits)
95                 shifts++;
96
97         return shifts;
98 }
99
100 /* Function to return the number of shifts to get a 1 in bit 0
101  */
102
103 static inline u32 calc_shifts(u32 x)
104 {
105         u32 shifts;
106
107         shifts = 0;
108
109         if (!x)
110                 return 0;
111
112         while (!(x & 1)) {
113                 x >>= 1;
114                 shifts++;
115         }
116
117         return shifts;
118 }
119
120 /*
121  * Temporary buffer manipulations.
122  */
123
124 static int yaffs_init_tmp_buffers(struct yaffs_dev *dev)
125 {
126         int i;
127         u8 *buf = (u8 *) 1;
128
129         memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer));
130
131         for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
132                 dev->temp_buffer[i].in_use = 0;
133                 buf = kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
134                 dev->temp_buffer[i].buffer = buf;
135         }
136
137         return buf ? YAFFS_OK : YAFFS_FAIL;
138 }
139
140 u8 *yaffs_get_temp_buffer(struct yaffs_dev * dev)
141 {
142         int i;
143
144         dev->temp_in_use++;
145         if (dev->temp_in_use > dev->max_temp)
146                 dev->max_temp = dev->temp_in_use;
147
148         for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
149                 if (dev->temp_buffer[i].in_use == 0) {
150                         dev->temp_buffer[i].in_use = 1;
151                         return dev->temp_buffer[i].buffer;
152                 }
153         }
154
155         yaffs_trace(YAFFS_TRACE_BUFFERS, "Out of temp buffers");
156         /*
157          * If we got here then we have to allocate an unmanaged one
158          * This is not good.
159          */
160
161         dev->unmanaged_buffer_allocs++;
162         return kmalloc(dev->data_bytes_per_chunk, GFP_NOFS);
163
164 }
165
166 void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer)
167 {
168         int i;
169
170         dev->temp_in_use--;
171
172         for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
173                 if (dev->temp_buffer[i].buffer == buffer) {
174                         dev->temp_buffer[i].in_use = 0;
175                         return;
176                 }
177         }
178
179         if (buffer) {
180                 /* assume it is an unmanaged one. */
181                 yaffs_trace(YAFFS_TRACE_BUFFERS,
182                         "Releasing unmanaged temp buffer");
183                 kfree(buffer);
184                 dev->unmanaged_buffer_deallocs++;
185         }
186
187 }
188
189 /*
190  * Functions for robustisizing TODO
191  *
192  */
193
194 static void yaffs_handle_chunk_wr_ok(struct yaffs_dev *dev, int nand_chunk,
195                                      const u8 *data,
196                                      const struct yaffs_ext_tags *tags)
197 {
198         (void) dev;
199         (void) nand_chunk;
200         (void) data;
201         (void) tags;
202 }
203
204 static void yaffs_handle_chunk_update(struct yaffs_dev *dev, int nand_chunk,
205                                       const struct yaffs_ext_tags *tags)
206 {
207         (void) dev;
208         (void) nand_chunk;
209         (void) tags;
210 }
211
212 void yaffs_handle_chunk_error(struct yaffs_dev *dev,
213                               struct yaffs_block_info *bi)
214 {
215         if (!bi->gc_prioritise) {
216                 bi->gc_prioritise = 1;
217                 dev->has_pending_prioritised_gc = 1;
218                 bi->chunk_error_strikes++;
219
220                 if (bi->chunk_error_strikes > 3) {
221                         bi->needs_retiring = 1; /* Too many stikes, so retire */
222                         yaffs_trace(YAFFS_TRACE_ALWAYS,
223                                 "yaffs: Block struck out");
224
225                 }
226         }
227 }
228
229 static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, int nand_chunk,
230                                         int erased_ok)
231 {
232         int flash_block = nand_chunk / dev->param.chunks_per_block;
233         struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
234
235         yaffs_handle_chunk_error(dev, bi);
236
237         if (erased_ok) {
238                 /* Was an actual write failure,
239                  * so mark the block for retirement.*/
240                 bi->needs_retiring = 1;
241                 yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
242                   "**>> Block %d needs retiring", flash_block);
243         }
244
245         /* Delete the chunk */
246         yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
247         yaffs_skip_rest_of_block(dev);
248 }
249
250 /*
251  * Verification code
252  */
253
254 /*
255  *  Simple hash function. Needs to have a reasonable spread
256  */
257
258 static inline int yaffs_hash_fn(int n)
259 {
260         if (n < 0)
261                 n = -n;
262         return n % YAFFS_NOBJECT_BUCKETS;
263 }
264
265 /*
266  * Access functions to useful fake objects.
267  * Note that root might have a presence in NAND if permissions are set.
268  */
269
270 struct yaffs_obj *yaffs_root(struct yaffs_dev *dev)
271 {
272         return dev->root_dir;
273 }
274
275 struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev)
276 {
277         return dev->lost_n_found;
278 }
279
280 /*
281  *  Erased NAND checking functions
282  */
283
284 int yaffs_check_ff(u8 *buffer, int n_bytes)
285 {
286         /* Horrible, slow implementation */
287         while (n_bytes--) {
288                 if (*buffer != 0xff)
289                         return 0;
290                 buffer++;
291         }
292         return 1;
293 }
294
295 static int yaffs_check_chunk_erased(struct yaffs_dev *dev, int nand_chunk)
296 {
297         int retval = YAFFS_OK;
298         u8 *data = yaffs_get_temp_buffer(dev);
299         struct yaffs_ext_tags tags;
300         int result;
301
302         result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags);
303
304         if (tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR)
305                 retval = YAFFS_FAIL;
306
307         if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) ||
308                 tags.chunk_used) {
309                 yaffs_trace(YAFFS_TRACE_NANDACCESS,
310                         "Chunk %d not erased", nand_chunk);
311                 retval = YAFFS_FAIL;
312         }
313
314         yaffs_release_temp_buffer(dev, data);
315
316         return retval;
317
318 }
319
320 static int yaffs_verify_chunk_written(struct yaffs_dev *dev,
321                                       int nand_chunk,
322                                       const u8 *data,
323                                       struct yaffs_ext_tags *tags)
324 {
325         int retval = YAFFS_OK;
326         struct yaffs_ext_tags temp_tags;
327         u8 *buffer = yaffs_get_temp_buffer(dev);
328         int result;
329
330         result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags);
331         if (memcmp(buffer, data, dev->data_bytes_per_chunk) ||
332             temp_tags.obj_id != tags->obj_id ||
333             temp_tags.chunk_id != tags->chunk_id ||
334             temp_tags.n_bytes != tags->n_bytes)
335                 retval = YAFFS_FAIL;
336
337         yaffs_release_temp_buffer(dev, buffer);
338
339         return retval;
340 }
341
342
343 int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks)
344 {
345         int reserved_chunks;
346         int reserved_blocks = dev->param.n_reserved_blocks;
347         int checkpt_blocks;
348
349         checkpt_blocks = yaffs_calc_checkpt_blocks_required(dev);
350
351         reserved_chunks =
352             (reserved_blocks + checkpt_blocks) * dev->param.chunks_per_block;
353
354         return (dev->n_free_chunks > (reserved_chunks + n_chunks));
355 }
356
357 static int yaffs_find_alloc_block(struct yaffs_dev *dev)
358 {
359         int i;
360         struct yaffs_block_info *bi;
361
362         if (dev->n_erased_blocks < 1) {
363                 /* Hoosterman we've got a problem.
364                  * Can't get space to gc
365                  */
366                 yaffs_trace(YAFFS_TRACE_ERROR,
367                   "yaffs tragedy: no more erased blocks");
368
369                 return -1;
370         }
371
372         /* Find an empty block. */
373
374         for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
375                 dev->alloc_block_finder++;
376                 if (dev->alloc_block_finder < dev->internal_start_block
377                     || dev->alloc_block_finder > dev->internal_end_block) {
378                         dev->alloc_block_finder = dev->internal_start_block;
379                 }
380
381                 bi = yaffs_get_block_info(dev, dev->alloc_block_finder);
382
383                 if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
384                         bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING;
385                         dev->seq_number++;
386                         bi->seq_number = dev->seq_number;
387                         dev->n_erased_blocks--;
388                         yaffs_trace(YAFFS_TRACE_ALLOCATE,
389                           "Allocated block %d, seq  %d, %d left" ,
390                            dev->alloc_block_finder, dev->seq_number,
391                            dev->n_erased_blocks);
392                         return dev->alloc_block_finder;
393                 }
394         }
395
396         yaffs_trace(YAFFS_TRACE_ALWAYS,
397                 "yaffs tragedy: no more erased blocks, but there should have been %d",
398                 dev->n_erased_blocks);
399
400         return -1;
401 }
402
403 static int yaffs_alloc_chunk(struct yaffs_dev *dev, int use_reserver,
404                              struct yaffs_block_info **block_ptr)
405 {
406         int ret_val;
407         struct yaffs_block_info *bi;
408
409         if (dev->alloc_block < 0) {
410                 /* Get next block to allocate off */
411                 dev->alloc_block = yaffs_find_alloc_block(dev);
412                 dev->alloc_page = 0;
413         }
414
415         if (!use_reserver && !yaffs_check_alloc_available(dev, 1)) {
416                 /* No space unless we're allowed to use the reserve. */
417                 return -1;
418         }
419
420         if (dev->n_erased_blocks < dev->param.n_reserved_blocks
421             && dev->alloc_page == 0)
422                 yaffs_trace(YAFFS_TRACE_ALLOCATE, "Allocating reserve");
423
424         /* Next page please.... */
425         if (dev->alloc_block >= 0) {
426                 bi = yaffs_get_block_info(dev, dev->alloc_block);
427
428                 ret_val = (dev->alloc_block * dev->param.chunks_per_block) +
429                     dev->alloc_page;
430                 bi->pages_in_use++;
431                 yaffs_set_chunk_bit(dev, dev->alloc_block, dev->alloc_page);
432
433                 dev->alloc_page++;
434
435                 dev->n_free_chunks--;
436
437                 /* If the block is full set the state to full */
438                 if (dev->alloc_page >= dev->param.chunks_per_block) {
439                         bi->block_state = YAFFS_BLOCK_STATE_FULL;
440                         dev->alloc_block = -1;
441                 }
442
443                 if (block_ptr)
444                         *block_ptr = bi;
445
446                 return ret_val;
447         }
448
449         yaffs_trace(YAFFS_TRACE_ERROR,
450                 "!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!");
451
452         return -1;
453 }
454
455 static int yaffs_get_erased_chunks(struct yaffs_dev *dev)
456 {
457         int n;
458
459         n = dev->n_erased_blocks * dev->param.chunks_per_block;
460
461         if (dev->alloc_block > 0)
462                 n += (dev->param.chunks_per_block - dev->alloc_page);
463
464         return n;
465
466 }
467
468 /*
469  * yaffs_skip_rest_of_block() skips over the rest of the allocation block
470  * if we don't want to write to it.
471  */
472 void yaffs_skip_rest_of_block(struct yaffs_dev *dev)
473 {
474         struct yaffs_block_info *bi;
475
476         if (dev->alloc_block > 0) {
477                 bi = yaffs_get_block_info(dev, dev->alloc_block);
478                 if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
479                         bi->block_state = YAFFS_BLOCK_STATE_FULL;
480                         dev->alloc_block = -1;
481                 }
482         }
483 }
484
485 static int yaffs_write_new_chunk(struct yaffs_dev *dev,
486                                  const u8 *data,
487                                  struct yaffs_ext_tags *tags, int use_reserver)
488 {
489         int attempts = 0;
490         int write_ok = 0;
491         int chunk;
492
493         yaffs2_checkpt_invalidate(dev);
494
495         do {
496                 struct yaffs_block_info *bi = 0;
497                 int erased_ok = 0;
498
499                 chunk = yaffs_alloc_chunk(dev, use_reserver, &bi);
500                 if (chunk < 0) {
501                         /* no space */
502                         break;
503                 }
504
505                 /* First check this chunk is erased, if it needs
506                  * checking.  The checking policy (unless forced
507                  * always on) is as follows:
508                  *
509                  * Check the first page we try to write in a block.
510                  * If the check passes then we don't need to check any
511                  * more.        If the check fails, we check again...
512                  * If the block has been erased, we don't need to check.
513                  *
514                  * However, if the block has been prioritised for gc,
515                  * then we think there might be something odd about
516                  * this block and stop using it.
517                  *
518                  * Rationale: We should only ever see chunks that have
519                  * not been erased if there was a partially written
520                  * chunk due to power loss.  This checking policy should
521                  * catch that case with very few checks and thus save a
522                  * lot of checks that are most likely not needed.
523                  *
524                  * Mods to the above
525                  * If an erase check fails or the write fails we skip the
526                  * rest of the block.
527                  */
528
529                 /* let's give it a try */
530                 attempts++;
531
532                 if (dev->param.always_check_erased)
533                         bi->skip_erased_check = 0;
534
535                 if (!bi->skip_erased_check) {
536                         erased_ok = yaffs_check_chunk_erased(dev, chunk);
537                         if (erased_ok != YAFFS_OK) {
538                                 yaffs_trace(YAFFS_TRACE_ERROR,
539                                   "**>> yaffs chunk %d was not erased",
540                                   chunk);
541
542                                 /* If not erased, delete this one,
543                                  * skip rest of block and
544                                  * try another chunk */
545                                 yaffs_chunk_del(dev, chunk, 1, __LINE__);
546                                 yaffs_skip_rest_of_block(dev);
547                                 continue;
548                         }
549                 }
550
551                 write_ok = yaffs_wr_chunk_tags_nand(dev, chunk, data, tags);
552
553                 if (!bi->skip_erased_check)
554                         write_ok =
555                             yaffs_verify_chunk_written(dev, chunk, data, tags);
556
557                 if (write_ok != YAFFS_OK) {
558                         /* Clean up aborted write, skip to next block and
559                          * try another chunk */
560                         yaffs_handle_chunk_wr_error(dev, chunk, erased_ok);
561                         continue;
562                 }
563
564                 bi->skip_erased_check = 1;
565
566                 /* Copy the data into the robustification buffer */
567                 yaffs_handle_chunk_wr_ok(dev, chunk, data, tags);
568
569         } while (write_ok != YAFFS_OK &&
570                  (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts));
571
572         if (!write_ok)
573                 chunk = -1;
574
575         if (attempts > 1) {
576                 yaffs_trace(YAFFS_TRACE_ERROR,
577                         "**>> yaffs write required %d attempts",
578                         attempts);
579                 dev->n_retried_writes += (attempts - 1);
580         }
581
582         return chunk;
583 }
584
585 /*
586  * Block retiring for handling a broken block.
587  */
588
589 static void yaffs_retire_block(struct yaffs_dev *dev, int flash_block)
590 {
591         struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
592
593         yaffs2_checkpt_invalidate(dev);
594
595         yaffs2_clear_oldest_dirty_seq(dev, bi);
596
597         if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) {
598                 if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) {
599                         yaffs_trace(YAFFS_TRACE_ALWAYS,
600                                 "yaffs: Failed to mark bad and erase block %d",
601                                 flash_block);
602                 } else {
603                         struct yaffs_ext_tags tags;
604                         int chunk_id =
605                             flash_block * dev->param.chunks_per_block;
606
607                         u8 *buffer = yaffs_get_temp_buffer(dev);
608
609                         memset(buffer, 0xff, dev->data_bytes_per_chunk);
610                         memset(&tags, 0, sizeof(tags));
611                         tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK;
612                         if (dev->param.write_chunk_tags_fn(dev, chunk_id -
613                                                            dev->chunk_offset,
614                                                            buffer,
615                                                            &tags) != YAFFS_OK)
616                                 yaffs_trace(YAFFS_TRACE_ALWAYS,
617                                         "yaffs: Failed to write bad block marker to block %d",
618                                         flash_block);
619
620                         yaffs_release_temp_buffer(dev, buffer);
621                 }
622         }
623
624         bi->block_state = YAFFS_BLOCK_STATE_DEAD;
625         bi->gc_prioritise = 0;
626         bi->needs_retiring = 0;
627
628         dev->n_retired_blocks++;
629 }
630
631 /*---------------- Name handling functions ------------*/
632
633 static u16 yaffs_calc_name_sum(const YCHAR *name)
634 {
635         u16 sum = 0;
636         u16 i = 1;
637
638         if (!name)
639                 return 0;
640
641         while ((*name) && i < (YAFFS_MAX_NAME_LENGTH / 2)) {
642
643                 /* 0x1f mask is case insensitive */
644                 sum += ((*name) & 0x1f) * i;
645                 i++;
646                 name++;
647         }
648         return sum;
649 }
650
651 void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name)
652 {
653         memset(obj->short_name, 0, sizeof(obj->short_name));
654         if (name &&
655                 strnlen(name, YAFFS_SHORT_NAME_LENGTH + 1) <=
656                 YAFFS_SHORT_NAME_LENGTH)
657                 strcpy(obj->short_name, name);
658         else
659                 obj->short_name[0] = _Y('\0');
660         obj->sum = yaffs_calc_name_sum(name);
661 }
662
663 void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
664                                 const struct yaffs_obj_hdr *oh)
665 {
666 #ifdef CONFIG_YAFFS_AUTO_UNICODE
667         YCHAR tmp_name[YAFFS_MAX_NAME_LENGTH + 1];
668         memset(tmp_name, 0, sizeof(tmp_name));
669         yaffs_load_name_from_oh(obj->my_dev, tmp_name, oh->name,
670                                 YAFFS_MAX_NAME_LENGTH + 1);
671         yaffs_set_obj_name(obj, tmp_name);
672 #else
673         yaffs_set_obj_name(obj, oh->name);
674 #endif
675 }
676
677 loff_t yaffs_max_file_size(struct yaffs_dev *dev)
678 {
679         if(sizeof(loff_t) < 8)
680                 return YAFFS_MAX_FILE_SIZE_32;
681         else
682                 return ((loff_t) YAFFS_MAX_CHUNK_ID) * dev->data_bytes_per_chunk;
683 }
684
685 /*-------------------- TNODES -------------------
686
687  * List of spare tnodes
688  * The list is hooked together using the first pointer
689  * in the tnode.
690  */
691
692 struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev)
693 {
694         struct yaffs_tnode *tn = yaffs_alloc_raw_tnode(dev);
695
696         if (tn) {
697                 memset(tn, 0, dev->tnode_size);
698                 dev->n_tnodes++;
699         }
700
701         dev->checkpoint_blocks_required = 0;    /* force recalculation */
702
703         return tn;
704 }
705
706 /* FreeTnode frees up a tnode and puts it back on the free list */
707 static void yaffs_free_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
708 {
709         yaffs_free_raw_tnode(dev, tn);
710         dev->n_tnodes--;
711         dev->checkpoint_blocks_required = 0;    /* force recalculation */
712 }
713
714 static void yaffs_deinit_tnodes_and_objs(struct yaffs_dev *dev)
715 {
716         yaffs_deinit_raw_tnodes_and_objs(dev);
717         dev->n_obj = 0;
718         dev->n_tnodes = 0;
719 }
720
721 static void yaffs_load_tnode_0(struct yaffs_dev *dev, struct yaffs_tnode *tn,
722                         unsigned pos, unsigned val)
723 {
724         u32 *map = (u32 *) tn;
725         u32 bit_in_map;
726         u32 bit_in_word;
727         u32 word_in_map;
728         u32 mask;
729
730         pos &= YAFFS_TNODES_LEVEL0_MASK;
731         val >>= dev->chunk_grp_bits;
732
733         bit_in_map = pos * dev->tnode_width;
734         word_in_map = bit_in_map / 32;
735         bit_in_word = bit_in_map & (32 - 1);
736
737         mask = dev->tnode_mask << bit_in_word;
738
739         map[word_in_map] &= ~mask;
740         map[word_in_map] |= (mask & (val << bit_in_word));
741
742         if (dev->tnode_width > (32 - bit_in_word)) {
743                 bit_in_word = (32 - bit_in_word);
744                 word_in_map++;
745                 mask =
746                     dev->tnode_mask >> bit_in_word;
747                 map[word_in_map] &= ~mask;
748                 map[word_in_map] |= (mask & (val >> bit_in_word));
749         }
750 }
751
752 u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
753                          unsigned pos)
754 {
755         u32 *map = (u32 *) tn;
756         u32 bit_in_map;
757         u32 bit_in_word;
758         u32 word_in_map;
759         u32 val;
760
761         pos &= YAFFS_TNODES_LEVEL0_MASK;
762
763         bit_in_map = pos * dev->tnode_width;
764         word_in_map = bit_in_map / 32;
765         bit_in_word = bit_in_map & (32 - 1);
766
767         val = map[word_in_map] >> bit_in_word;
768
769         if (dev->tnode_width > (32 - bit_in_word)) {
770                 bit_in_word = (32 - bit_in_word);
771                 word_in_map++;
772                 val |= (map[word_in_map] << bit_in_word);
773         }
774
775         val &= dev->tnode_mask;
776         val <<= dev->chunk_grp_bits;
777
778         return val;
779 }
780
781 /* ------------------- End of individual tnode manipulation -----------------*/
782
783 /* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
784  * The look up tree is represented by the top tnode and the number of top_level
785  * in the tree. 0 means only the level 0 tnode is in the tree.
786  */
787
788 /* FindLevel0Tnode finds the level 0 tnode, if one exists. */
789 struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
790                                        struct yaffs_file_var *file_struct,
791                                        u32 chunk_id)
792 {
793         struct yaffs_tnode *tn = file_struct->top;
794         u32 i;
795         int required_depth;
796         int level = file_struct->top_level;
797
798         (void) dev;
799
800         /* Check sane level and chunk Id */
801         if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
802                 return NULL;
803
804         if (chunk_id > YAFFS_MAX_CHUNK_ID)
805                 return NULL;
806
807         /* First check we're tall enough (ie enough top_level) */
808
809         i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
810         required_depth = 0;
811         while (i) {
812                 i >>= YAFFS_TNODES_INTERNAL_BITS;
813                 required_depth++;
814         }
815
816         if (required_depth > file_struct->top_level)
817                 return NULL;    /* Not tall enough, so we can't find it */
818
819         /* Traverse down to level 0 */
820         while (level > 0 && tn) {
821                 tn = tn->internal[(chunk_id >>
822                                    (YAFFS_TNODES_LEVEL0_BITS +
823                                     (level - 1) *
824                                     YAFFS_TNODES_INTERNAL_BITS)) &
825                                   YAFFS_TNODES_INTERNAL_MASK];
826                 level--;
827         }
828
829         return tn;
830 }
831
832 /* add_find_tnode_0 finds the level 0 tnode if it exists,
833  * otherwise first expands the tree.
834  * This happens in two steps:
835  *  1. If the tree isn't tall enough, then make it taller.
836  *  2. Scan down the tree towards the level 0 tnode adding tnodes if required.
837  *
838  * Used when modifying the tree.
839  *
840  *  If the tn argument is NULL, then a fresh tnode will be added otherwise the
841  *  specified tn will be plugged into the ttree.
842  */
843
844 struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
845                                            struct yaffs_file_var *file_struct,
846                                            u32 chunk_id,
847                                            struct yaffs_tnode *passed_tn)
848 {
849         int required_depth;
850         int i;
851         int l;
852         struct yaffs_tnode *tn;
853         u32 x;
854
855         /* Check sane level and page Id */
856         if (file_struct->top_level < 0 ||
857             file_struct->top_level > YAFFS_TNODES_MAX_LEVEL)
858                 return NULL;
859
860         if (chunk_id > YAFFS_MAX_CHUNK_ID)
861                 return NULL;
862
863         /* First check we're tall enough (ie enough top_level) */
864
865         x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
866         required_depth = 0;
867         while (x) {
868                 x >>= YAFFS_TNODES_INTERNAL_BITS;
869                 required_depth++;
870         }
871
872         if (required_depth > file_struct->top_level) {
873                 /* Not tall enough, gotta make the tree taller */
874                 for (i = file_struct->top_level; i < required_depth; i++) {
875
876                         tn = yaffs_get_tnode(dev);
877
878                         if (tn) {
879                                 tn->internal[0] = file_struct->top;
880                                 file_struct->top = tn;
881                                 file_struct->top_level++;
882                         } else {
883                                 yaffs_trace(YAFFS_TRACE_ERROR,
884                                         "yaffs: no more tnodes");
885                                 return NULL;
886                         }
887                 }
888         }
889
890         /* Traverse down to level 0, adding anything we need */
891
892         l = file_struct->top_level;
893         tn = file_struct->top;
894
895         if (l > 0) {
896                 while (l > 0 && tn) {
897                         x = (chunk_id >>
898                              (YAFFS_TNODES_LEVEL0_BITS +
899                               (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
900                             YAFFS_TNODES_INTERNAL_MASK;
901
902                         if ((l > 1) && !tn->internal[x]) {
903                                 /* Add missing non-level-zero tnode */
904                                 tn->internal[x] = yaffs_get_tnode(dev);
905                                 if (!tn->internal[x])
906                                         return NULL;
907                         } else if (l == 1) {
908                                 /* Looking from level 1 at level 0 */
909                                 if (passed_tn) {
910                                         /* If we already have one, release it */
911                                         if (tn->internal[x])
912                                                 yaffs_free_tnode(dev,
913                                                         tn->internal[x]);
914                                         tn->internal[x] = passed_tn;
915
916                                 } else if (!tn->internal[x]) {
917                                         /* Don't have one, none passed in */
918                                         tn->internal[x] = yaffs_get_tnode(dev);
919                                         if (!tn->internal[x])
920                                                 return NULL;
921                                 }
922                         }
923
924                         tn = tn->internal[x];
925                         l--;
926                 }
927         } else {
928                 /* top is level 0 */
929                 if (passed_tn) {
930                         memcpy(tn, passed_tn,
931                                (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8);
932                         yaffs_free_tnode(dev, passed_tn);
933                 }
934         }
935
936         return tn;
937 }
938
939 static int yaffs_tags_match(const struct yaffs_ext_tags *tags, int obj_id,
940                             int chunk_obj)
941 {
942         return (tags->chunk_id == chunk_obj &&
943                 tags->obj_id == obj_id &&
944                 !tags->is_deleted) ? 1 : 0;
945
946 }
947
948 static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, int the_chunk,
949                                         struct yaffs_ext_tags *tags, int obj_id,
950                                         int inode_chunk)
951 {
952         int j;
953
954         for (j = 0; the_chunk && j < dev->chunk_grp_size; j++) {
955                 if (yaffs_check_chunk_bit
956                     (dev, the_chunk / dev->param.chunks_per_block,
957                      the_chunk % dev->param.chunks_per_block)) {
958
959                         if (dev->chunk_grp_size == 1)
960                                 return the_chunk;
961                         else {
962                                 yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
963                                                          tags);
964                                 if (yaffs_tags_match(tags,
965                                                         obj_id, inode_chunk)) {
966                                         /* found it; */
967                                         return the_chunk;
968                                 }
969                         }
970                 }
971                 the_chunk++;
972         }
973         return -1;
974 }
975
976 static int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
977                                     struct yaffs_ext_tags *tags)
978 {
979         /*Get the Tnode, then get the level 0 offset chunk offset */
980         struct yaffs_tnode *tn;
981         int the_chunk = -1;
982         struct yaffs_ext_tags local_tags;
983         int ret_val = -1;
984         struct yaffs_dev *dev = in->my_dev;
985
986         if (!tags) {
987                 /* Passed a NULL, so use our own tags space */
988                 tags = &local_tags;
989         }
990
991         tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
992
993         if (!tn)
994                 return ret_val;
995
996         the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
997
998         ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
999                                               inode_chunk);
1000         return ret_val;
1001 }
1002
1003 static int yaffs_find_del_file_chunk(struct yaffs_obj *in, int inode_chunk,
1004                                      struct yaffs_ext_tags *tags)
1005 {
1006         /* Get the Tnode, then get the level 0 offset chunk offset */
1007         struct yaffs_tnode *tn;
1008         int the_chunk = -1;
1009         struct yaffs_ext_tags local_tags;
1010         struct yaffs_dev *dev = in->my_dev;
1011         int ret_val = -1;
1012
1013         if (!tags) {
1014                 /* Passed a NULL, so use our own tags space */
1015                 tags = &local_tags;
1016         }
1017
1018         tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
1019
1020         if (!tn)
1021                 return ret_val;
1022
1023         the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
1024
1025         ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
1026                                               inode_chunk);
1027
1028         /* Delete the entry in the filestructure (if found) */
1029         if (ret_val != -1)
1030                 yaffs_load_tnode_0(dev, tn, inode_chunk, 0);
1031
1032         return ret_val;
1033 }
1034
1035 int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
1036                             int nand_chunk, int in_scan)
1037 {
1038         /* NB in_scan is zero unless scanning.
1039          * For forward scanning, in_scan is > 0;
1040          * for backward scanning in_scan is < 0
1041          *
1042          * nand_chunk = 0 is a dummy insert to make sure the tnodes are there.
1043          */
1044
1045         struct yaffs_tnode *tn;
1046         struct yaffs_dev *dev = in->my_dev;
1047         int existing_cunk;
1048         struct yaffs_ext_tags existing_tags;
1049         struct yaffs_ext_tags new_tags;
1050         unsigned existing_serial, new_serial;
1051
1052         if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) {
1053                 /* Just ignore an attempt at putting a chunk into a non-file
1054                  * during scanning.
1055                  * If it is not during Scanning then something went wrong!
1056                  */
1057                 if (!in_scan) {
1058                         yaffs_trace(YAFFS_TRACE_ERROR,
1059                                 "yaffs tragedy:attempt to put data chunk into a non-file"
1060                                 );
1061                         BUG();
1062                 }
1063
1064                 yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
1065                 return YAFFS_OK;
1066         }
1067
1068         tn = yaffs_add_find_tnode_0(dev,
1069                                     &in->variant.file_variant,
1070                                     inode_chunk, NULL);
1071         if (!tn)
1072                 return YAFFS_FAIL;
1073
1074         if (!nand_chunk)
1075                 /* Dummy insert, bail now */
1076                 return YAFFS_OK;
1077
1078         existing_cunk = yaffs_get_group_base(dev, tn, inode_chunk);
1079
1080         if (in_scan != 0) {
1081                 /* If we're scanning then we need to test for duplicates
1082                  * NB This does not need to be efficient since it should only
1083                  * happen when the power fails during a write, then only one
1084                  * chunk should ever be affected.
1085                  *
1086                  * Correction for YAFFS2: This could happen quite a lot and we
1087                  * need to think about efficiency! TODO
1088                  * Update: For backward scanning we don't need to re-read tags
1089                  * so this is quite cheap.
1090                  */
1091
1092                 if (existing_cunk > 0) {
1093                         /* NB Right now existing chunk will not be real
1094                          * chunk_id if the chunk group size > 1
1095                          * thus we have to do a FindChunkInFile to get the
1096                          * real chunk id.
1097                          *
1098                          * We have a duplicate now we need to decide which
1099                          * one to use:
1100                          *
1101                          * Backwards scanning YAFFS2: The old one is what
1102                          * we use, dump the new one.
1103                          * YAFFS1: Get both sets of tags and compare serial
1104                          * numbers.
1105                          */
1106
1107                         if (in_scan > 0) {
1108                                 /* Only do this for forward scanning */
1109                                 yaffs_rd_chunk_tags_nand(dev,
1110                                                          nand_chunk,
1111                                                          NULL, &new_tags);
1112
1113                                 /* Do a proper find */
1114                                 existing_cunk =
1115                                     yaffs_find_chunk_in_file(in, inode_chunk,
1116                                                              &existing_tags);
1117                         }
1118
1119                         if (existing_cunk <= 0) {
1120                                 /*Hoosterman - how did this happen? */
1121
1122                                 yaffs_trace(YAFFS_TRACE_ERROR,
1123                                         "yaffs tragedy: existing chunk < 0 in scan"
1124                                         );
1125
1126                         }
1127
1128                         /* NB The deleted flags should be false, otherwise
1129                          * the chunks will not be loaded during a scan
1130                          */
1131
1132                         if (in_scan > 0) {
1133                                 new_serial = new_tags.serial_number;
1134                                 existing_serial = existing_tags.serial_number;
1135                         }
1136
1137                         if ((in_scan > 0) &&
1138                             (existing_cunk <= 0 ||
1139                              ((existing_serial + 1) & 3) == new_serial)) {
1140                                 /* Forward scanning.
1141                                  * Use new
1142                                  * Delete the old one and drop through to
1143                                  * update the tnode
1144                                  */
1145                                 yaffs_chunk_del(dev, existing_cunk, 1,
1146                                                 __LINE__);
1147                         } else {
1148                                 /* Backward scanning or we want to use the
1149                                  * existing one
1150                                  * Delete the new one and return early so that
1151                                  * the tnode isn't changed
1152                                  */
1153                                 yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
1154                                 return YAFFS_OK;
1155                         }
1156                 }
1157
1158         }
1159
1160         if (existing_cunk == 0)
1161                 in->n_data_chunks++;
1162
1163         yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk);
1164
1165         return YAFFS_OK;
1166 }
1167
1168 static void yaffs_soft_del_chunk(struct yaffs_dev *dev, int chunk)
1169 {
1170         struct yaffs_block_info *the_block;
1171         unsigned block_no;
1172
1173         yaffs_trace(YAFFS_TRACE_DELETION, "soft delete chunk %d", chunk);
1174
1175         block_no = chunk / dev->param.chunks_per_block;
1176         the_block = yaffs_get_block_info(dev, block_no);
1177         if (the_block) {
1178                 the_block->soft_del_pages++;
1179                 dev->n_free_chunks++;
1180                 yaffs2_update_oldest_dirty_seq(dev, block_no, the_block);
1181         }
1182 }
1183
1184 /* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all
1185  * the chunks in the file.
1186  * All soft deleting does is increment the block's softdelete count and pulls
1187  * the chunk out of the tnode.
1188  * Thus, essentially this is the same as DeleteWorker except that the chunks
1189  * are soft deleted.
1190  */
1191
1192 static int yaffs_soft_del_worker(struct yaffs_obj *in, struct yaffs_tnode *tn,
1193                                  u32 level, int chunk_offset)
1194 {
1195         int i;
1196         int the_chunk;
1197         int all_done = 1;
1198         struct yaffs_dev *dev = in->my_dev;
1199
1200         if (!tn)
1201                 return 1;
1202
1203         if (level > 0) {
1204                 for (i = YAFFS_NTNODES_INTERNAL - 1;
1205                         all_done && i >= 0;
1206                         i--) {
1207                         if (tn->internal[i]) {
1208                                 all_done =
1209                                     yaffs_soft_del_worker(in,
1210                                         tn->internal[i],
1211                                         level - 1,
1212                                         (chunk_offset <<
1213                                         YAFFS_TNODES_INTERNAL_BITS)
1214                                         + i);
1215                                 if (all_done) {
1216                                         yaffs_free_tnode(dev,
1217                                                 tn->internal[i]);
1218                                         tn->internal[i] = NULL;
1219                                 } else {
1220                                         /* Can this happen? */
1221                                 }
1222                         }
1223                 }
1224                 return (all_done) ? 1 : 0;
1225         }
1226
1227         /* level 0 */
1228          for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
1229                 the_chunk = yaffs_get_group_base(dev, tn, i);
1230                 if (the_chunk) {
1231                         yaffs_soft_del_chunk(dev, the_chunk);
1232                         yaffs_load_tnode_0(dev, tn, i, 0);
1233                 }
1234         }
1235         return 1;
1236 }
1237
1238 static void yaffs_remove_obj_from_dir(struct yaffs_obj *obj)
1239 {
1240         struct yaffs_dev *dev = obj->my_dev;
1241         struct yaffs_obj *parent;
1242
1243         yaffs_verify_obj_in_dir(obj);
1244         parent = obj->parent;
1245
1246         yaffs_verify_dir(parent);
1247
1248         if (dev && dev->param.remove_obj_fn)
1249                 dev->param.remove_obj_fn(obj);
1250
1251         list_del_init(&obj->siblings);
1252         obj->parent = NULL;
1253
1254         yaffs_verify_dir(parent);
1255 }
1256
1257 void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj)
1258 {
1259         if (!directory) {
1260                 yaffs_trace(YAFFS_TRACE_ALWAYS,
1261                         "tragedy: Trying to add an object to a null pointer directory"
1262                         );
1263                 BUG();
1264                 return;
1265         }
1266         if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
1267                 yaffs_trace(YAFFS_TRACE_ALWAYS,
1268                         "tragedy: Trying to add an object to a non-directory"
1269                         );
1270                 BUG();
1271         }
1272
1273         if (obj->siblings.prev == NULL) {
1274                 /* Not initialised */
1275                 BUG();
1276         }
1277
1278         yaffs_verify_dir(directory);
1279
1280         yaffs_remove_obj_from_dir(obj);
1281
1282         /* Now add it */
1283         list_add(&obj->siblings, &directory->variant.dir_variant.children);
1284         obj->parent = directory;
1285
1286         if (directory == obj->my_dev->unlinked_dir
1287             || directory == obj->my_dev->del_dir) {
1288                 obj->unlinked = 1;
1289                 obj->my_dev->n_unlinked_files++;
1290                 obj->rename_allowed = 0;
1291         }
1292
1293         yaffs_verify_dir(directory);
1294         yaffs_verify_obj_in_dir(obj);
1295 }
1296
1297 static int yaffs_change_obj_name(struct yaffs_obj *obj,
1298                                  struct yaffs_obj *new_dir,
1299                                  const YCHAR *new_name, int force, int shadows)
1300 {
1301         int unlink_op;
1302         int del_op;
1303         struct yaffs_obj *existing_target;
1304
1305         if (new_dir == NULL)
1306                 new_dir = obj->parent;  /* use the old directory */
1307
1308         if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
1309                 yaffs_trace(YAFFS_TRACE_ALWAYS,
1310                         "tragedy: yaffs_change_obj_name: new_dir is not a directory"
1311                         );
1312                 BUG();
1313         }
1314
1315         unlink_op = (new_dir == obj->my_dev->unlinked_dir);
1316         del_op = (new_dir == obj->my_dev->del_dir);
1317
1318         existing_target = yaffs_find_by_name(new_dir, new_name);
1319
1320         /* If the object is a file going into the unlinked directory,
1321          *   then it is OK to just stuff it in since duplicate names are OK.
1322          *   else only proceed if the new name does not exist and we're putting
1323          *   it into a directory.
1324          */
1325         if (!(unlink_op || del_op || force ||
1326               shadows > 0 || !existing_target) ||
1327               new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
1328                 return YAFFS_FAIL;
1329
1330         yaffs_set_obj_name(obj, new_name);
1331         obj->dirty = 1;
1332         yaffs_add_obj_to_dir(new_dir, obj);
1333
1334         if (unlink_op)
1335                 obj->unlinked = 1;
1336
1337         /* If it is a deletion then we mark it as a shrink for gc  */
1338         if (yaffs_update_oh(obj, new_name, 0, del_op, shadows, NULL) >= 0)
1339                 return YAFFS_OK;
1340
1341         return YAFFS_FAIL;
1342 }
1343
1344 /*------------------------ Short Operations Cache ------------------------------
1345  *   In many situations where there is no high level buffering  a lot of
1346  *   reads might be short sequential reads, and a lot of writes may be short
1347  *   sequential writes. eg. scanning/writing a jpeg file.
1348  *   In these cases, a short read/write cache can provide a huge perfomance
1349  *   benefit with dumb-as-a-rock code.
1350  *   In Linux, the page cache provides read buffering and the short op cache
1351  *   provides write buffering.
1352  *
1353  *   There are a small number (~10) of cache chunks per device so that we don't
1354  *   need a very intelligent search.
1355  */
1356
1357 static int yaffs_obj_cache_dirty(struct yaffs_obj *obj)
1358 {
1359         struct yaffs_dev *dev = obj->my_dev;
1360         int i;
1361         struct yaffs_cache *cache;
1362         int n_caches = obj->my_dev->param.n_caches;
1363
1364         for (i = 0; i < n_caches; i++) {
1365                 cache = &dev->cache[i];
1366                 if (cache->object == obj && cache->dirty)
1367                         return 1;
1368         }
1369
1370         return 0;
1371 }
1372
1373 static void yaffs_flush_file_cache(struct yaffs_obj *obj)
1374 {
1375         struct yaffs_dev *dev = obj->my_dev;
1376         int lowest = -99;       /* Stop compiler whining. */
1377         int i;
1378         struct yaffs_cache *cache;
1379         int chunk_written = 0;
1380         int n_caches = obj->my_dev->param.n_caches;
1381
1382         if (n_caches < 1)
1383                 return;
1384         do {
1385                 cache = NULL;
1386
1387                 /* Find the lowest dirty chunk for this object */
1388                 for (i = 0; i < n_caches; i++) {
1389                         if (dev->cache[i].object == obj &&
1390                             dev->cache[i].dirty) {
1391                                 if (!cache ||
1392                                     dev->cache[i].chunk_id < lowest) {
1393                                         cache = &dev->cache[i];
1394                                         lowest = cache->chunk_id;
1395                                 }
1396                         }
1397                 }
1398
1399                 if (cache && !cache->locked) {
1400                         /* Write it out and free it up */
1401                         chunk_written =
1402                             yaffs_wr_data_obj(cache->object,
1403                                               cache->chunk_id,
1404                                               cache->data,
1405                                               cache->n_bytes, 1);
1406                         cache->dirty = 0;
1407                         cache->object = NULL;
1408                 }
1409         } while (cache && chunk_written > 0);
1410
1411         if (cache)
1412                 /* Hoosterman, disk full while writing cache out. */
1413                 yaffs_trace(YAFFS_TRACE_ERROR,
1414                         "yaffs tragedy: no space during cache write");
1415 }
1416
1417 /*yaffs_flush_whole_cache(dev)
1418  *
1419  *
1420  */
1421
1422 void yaffs_flush_whole_cache(struct yaffs_dev *dev)
1423 {
1424         struct yaffs_obj *obj;
1425         int n_caches = dev->param.n_caches;
1426         int i;
1427
1428         /* Find a dirty object in the cache and flush it...
1429          * until there are no further dirty objects.
1430          */
1431         do {
1432                 obj = NULL;
1433                 for (i = 0; i < n_caches && !obj; i++) {
1434                         if (dev->cache[i].object && dev->cache[i].dirty)
1435                                 obj = dev->cache[i].object;
1436                 }
1437                 if (obj)
1438                         yaffs_flush_file_cache(obj);
1439         } while (obj);
1440
1441 }
1442
1443 /* Grab us a cache chunk for use.
1444  * First look for an empty one.
1445  * Then look for the least recently used non-dirty one.
1446  * Then look for the least recently used dirty one...., flush and look again.
1447  */
1448 static struct yaffs_cache *yaffs_grab_chunk_worker(struct yaffs_dev *dev)
1449 {
1450         int i;
1451
1452         if (dev->param.n_caches > 0) {
1453                 for (i = 0; i < dev->param.n_caches; i++) {
1454                         if (!dev->cache[i].object)
1455                                 return &dev->cache[i];
1456                 }
1457         }
1458         return NULL;
1459 }
1460
1461 static struct yaffs_cache *yaffs_grab_chunk_cache(struct yaffs_dev *dev)
1462 {
1463         struct yaffs_cache *cache;
1464         struct yaffs_obj *the_obj;
1465         int usage;
1466         int i;
1467         int pushout;
1468
1469         if (dev->param.n_caches < 1)
1470                 return NULL;
1471
1472         /* Try find a non-dirty one... */
1473
1474         cache = yaffs_grab_chunk_worker(dev);
1475
1476         if (!cache) {
1477                 /* They were all dirty, find the LRU object and flush
1478                  * its cache, then  find again.
1479                  * NB what's here is not very accurate,
1480                  * we actually flush the object with the LRU chunk.
1481                  */
1482
1483                 /* With locking we can't assume we can use entry zero,
1484                  * Set the_obj to a valid pointer for Coverity. */
1485                 the_obj = dev->cache[0].object;
1486                 usage = -1;
1487                 cache = NULL;
1488                 pushout = -1;
1489
1490                 for (i = 0; i < dev->param.n_caches; i++) {
1491                         if (dev->cache[i].object &&
1492                             !dev->cache[i].locked &&
1493                             (dev->cache[i].last_use < usage ||
1494                             !cache)) {
1495                                 usage = dev->cache[i].last_use;
1496                                 the_obj = dev->cache[i].object;
1497                                 cache = &dev->cache[i];
1498                                 pushout = i;
1499                         }
1500                 }
1501
1502                 if (!cache || cache->dirty) {
1503                         /* Flush and try again */
1504                         yaffs_flush_file_cache(the_obj);
1505                         cache = yaffs_grab_chunk_worker(dev);
1506                 }
1507         }
1508         return cache;
1509 }
1510
1511 /* Find a cached chunk */
1512 static struct yaffs_cache *yaffs_find_chunk_cache(const struct yaffs_obj *obj,
1513                                                   int chunk_id)
1514 {
1515         struct yaffs_dev *dev = obj->my_dev;
1516         int i;
1517
1518         if (dev->param.n_caches < 1)
1519                 return NULL;
1520
1521         for (i = 0; i < dev->param.n_caches; i++) {
1522                 if (dev->cache[i].object == obj &&
1523                     dev->cache[i].chunk_id == chunk_id) {
1524                         dev->cache_hits++;
1525
1526                         return &dev->cache[i];
1527                 }
1528         }
1529         return NULL;
1530 }
1531
1532 /* Mark the chunk for the least recently used algorithym */
1533 static void yaffs_use_cache(struct yaffs_dev *dev, struct yaffs_cache *cache,
1534                             int is_write)
1535 {
1536         int i;
1537
1538         if (dev->param.n_caches < 1)
1539                 return;
1540
1541         if (dev->cache_last_use < 0 ||
1542                 dev->cache_last_use > 100000000) {
1543                 /* Reset the cache usages */
1544                 for (i = 1; i < dev->param.n_caches; i++)
1545                         dev->cache[i].last_use = 0;
1546
1547                 dev->cache_last_use = 0;
1548         }
1549         dev->cache_last_use++;
1550         cache->last_use = dev->cache_last_use;
1551
1552         if (is_write)
1553                 cache->dirty = 1;
1554 }
1555
1556 /* Invalidate a single cache page.
1557  * Do this when a whole page gets written,
1558  * ie the short cache for this page is no longer valid.
1559  */
1560 static void yaffs_invalidate_chunk_cache(struct yaffs_obj *object, int chunk_id)
1561 {
1562         struct yaffs_cache *cache;
1563
1564         if (object->my_dev->param.n_caches > 0) {
1565                 cache = yaffs_find_chunk_cache(object, chunk_id);
1566
1567                 if (cache)
1568                         cache->object = NULL;
1569         }
1570 }
1571
1572 /* Invalidate all the cache pages associated with this object
1573  * Do this whenever ther file is deleted or resized.
1574  */
1575 static void yaffs_invalidate_whole_cache(struct yaffs_obj *in)
1576 {
1577         int i;
1578         struct yaffs_dev *dev = in->my_dev;
1579
1580         if (dev->param.n_caches > 0) {
1581                 /* Invalidate it. */
1582                 for (i = 0; i < dev->param.n_caches; i++) {
1583                         if (dev->cache[i].object == in)
1584                                 dev->cache[i].object = NULL;
1585                 }
1586         }
1587 }
1588
1589 static void yaffs_unhash_obj(struct yaffs_obj *obj)
1590 {
1591         int bucket;
1592         struct yaffs_dev *dev = obj->my_dev;
1593
1594         /* If it is still linked into the bucket list, free from the list */
1595         if (!list_empty(&obj->hash_link)) {
1596                 list_del_init(&obj->hash_link);
1597                 bucket = yaffs_hash_fn(obj->obj_id);
1598                 dev->obj_bucket[bucket].count--;
1599         }
1600 }
1601
1602 /*  FreeObject frees up a Object and puts it back on the free list */
1603 static void yaffs_free_obj(struct yaffs_obj *obj)
1604 {
1605         struct yaffs_dev *dev;
1606
1607         if (!obj) {
1608                 BUG();
1609                 return;
1610         }
1611         dev = obj->my_dev;
1612         yaffs_trace(YAFFS_TRACE_OS, "FreeObject %p inode %p",
1613                 obj, obj->my_inode);
1614         if (obj->parent)
1615                 BUG();
1616         if (!list_empty(&obj->siblings))
1617                 BUG();
1618
1619         if (obj->my_inode) {
1620                 /* We're still hooked up to a cached inode.
1621                  * Don't delete now, but mark for later deletion
1622                  */
1623                 obj->defered_free = 1;
1624                 return;
1625         }
1626
1627         yaffs_unhash_obj(obj);
1628
1629         yaffs_free_raw_obj(dev, obj);
1630         dev->n_obj--;
1631         dev->checkpoint_blocks_required = 0;    /* force recalculation */
1632 }
1633
1634 void yaffs_handle_defered_free(struct yaffs_obj *obj)
1635 {
1636         if (obj->defered_free)
1637                 yaffs_free_obj(obj);
1638 }
1639
1640 static int yaffs_generic_obj_del(struct yaffs_obj *in)
1641 {
1642         /* Iinvalidate the file's data in the cache, without flushing. */
1643         yaffs_invalidate_whole_cache(in);
1644
1645         if (in->my_dev->param.is_yaffs2 && in->parent != in->my_dev->del_dir) {
1646                 /* Move to unlinked directory so we have a deletion record */
1647                 yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0,
1648                                       0);
1649         }
1650
1651         yaffs_remove_obj_from_dir(in);
1652         yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__);
1653         in->hdr_chunk = 0;
1654
1655         yaffs_free_obj(in);
1656         return YAFFS_OK;
1657
1658 }
1659
1660 static void yaffs_soft_del_file(struct yaffs_obj *obj)
1661 {
1662         if (!obj->deleted ||
1663             obj->variant_type != YAFFS_OBJECT_TYPE_FILE ||
1664             obj->soft_del)
1665                 return;
1666
1667         if (obj->n_data_chunks <= 0) {
1668                 /* Empty file with no duplicate object headers,
1669                  * just delete it immediately */
1670                 yaffs_free_tnode(obj->my_dev, obj->variant.file_variant.top);
1671                 obj->variant.file_variant.top = NULL;
1672                 yaffs_trace(YAFFS_TRACE_TRACING,
1673                         "yaffs: Deleting empty file %d",
1674                         obj->obj_id);
1675                 yaffs_generic_obj_del(obj);
1676         } else {
1677                 yaffs_soft_del_worker(obj,
1678                                       obj->variant.file_variant.top,
1679                                       obj->variant.
1680                                       file_variant.top_level, 0);
1681                 obj->soft_del = 1;
1682         }
1683 }
1684
1685 /* Pruning removes any part of the file structure tree that is beyond the
1686  * bounds of the file (ie that does not point to chunks).
1687  *
1688  * A file should only get pruned when its size is reduced.
1689  *
1690  * Before pruning, the chunks must be pulled from the tree and the
1691  * level 0 tnode entries must be zeroed out.
1692  * Could also use this for file deletion, but that's probably better handled
1693  * by a special case.
1694  *
1695  * This function is recursive. For levels > 0 the function is called again on
1696  * any sub-tree. For level == 0 we just check if the sub-tree has data.
1697  * If there is no data in a subtree then it is pruned.
1698  */
1699
1700 static struct yaffs_tnode *yaffs_prune_worker(struct yaffs_dev *dev,
1701                                               struct yaffs_tnode *tn, u32 level,
1702                                               int del0)
1703 {
1704         int i;
1705         int has_data;
1706
1707         if (!tn)
1708                 return tn;
1709
1710         has_data = 0;
1711
1712         if (level > 0) {
1713                 for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
1714                         if (tn->internal[i]) {
1715                                 tn->internal[i] =
1716                                     yaffs_prune_worker(dev,
1717                                                 tn->internal[i],
1718                                                 level - 1,
1719                                                 (i == 0) ? del0 : 1);
1720                         }
1721
1722                         if (tn->internal[i])
1723                                 has_data++;
1724                 }
1725         } else {
1726                 int tnode_size_u32 = dev->tnode_size / sizeof(u32);
1727                 u32 *map = (u32 *) tn;
1728
1729                 for (i = 0; !has_data && i < tnode_size_u32; i++) {
1730                         if (map[i])
1731                                 has_data++;
1732                 }
1733         }
1734
1735         if (has_data == 0 && del0) {
1736                 /* Free and return NULL */
1737                 yaffs_free_tnode(dev, tn);
1738                 tn = NULL;
1739         }
1740         return tn;
1741 }
1742
1743 static int yaffs_prune_tree(struct yaffs_dev *dev,
1744                             struct yaffs_file_var *file_struct)
1745 {
1746         int i;
1747         int has_data;
1748         int done = 0;
1749         struct yaffs_tnode *tn;
1750
1751         if (file_struct->top_level < 1)
1752                 return YAFFS_OK;
1753
1754         file_struct->top =
1755            yaffs_prune_worker(dev, file_struct->top, file_struct->top_level, 0);
1756
1757         /* Now we have a tree with all the non-zero branches NULL but
1758          * the height is the same as it was.
1759          * Let's see if we can trim internal tnodes to shorten the tree.
1760          * We can do this if only the 0th element in the tnode is in use
1761          * (ie all the non-zero are NULL)
1762          */
1763
1764         while (file_struct->top_level && !done) {
1765                 tn = file_struct->top;
1766
1767                 has_data = 0;
1768                 for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
1769                         if (tn->internal[i])
1770                                 has_data++;
1771                 }
1772
1773                 if (!has_data) {
1774                         file_struct->top = tn->internal[0];
1775                         file_struct->top_level--;
1776                         yaffs_free_tnode(dev, tn);
1777                 } else {
1778                         done = 1;
1779                 }
1780         }
1781
1782         return YAFFS_OK;
1783 }
1784
1785 /*-------------------- End of File Structure functions.-------------------*/
1786
1787 /* alloc_empty_obj gets us a clean Object.*/
1788 static struct yaffs_obj *yaffs_alloc_empty_obj(struct yaffs_dev *dev)
1789 {
1790         struct yaffs_obj *obj = yaffs_alloc_raw_obj(dev);
1791
1792         if (!obj)
1793                 return obj;
1794
1795         dev->n_obj++;
1796
1797         /* Now sweeten it up... */
1798
1799         memset(obj, 0, sizeof(struct yaffs_obj));
1800         obj->being_created = 1;
1801
1802         obj->my_dev = dev;
1803         obj->hdr_chunk = 0;
1804         obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN;
1805         INIT_LIST_HEAD(&(obj->hard_links));
1806         INIT_LIST_HEAD(&(obj->hash_link));
1807         INIT_LIST_HEAD(&obj->siblings);
1808
1809         /* Now make the directory sane */
1810         if (dev->root_dir) {
1811                 obj->parent = dev->root_dir;
1812                 list_add(&(obj->siblings),
1813                          &dev->root_dir->variant.dir_variant.children);
1814         }
1815
1816         /* Add it to the lost and found directory.
1817          * NB Can't put root or lost-n-found in lost-n-found so
1818          * check if lost-n-found exists first
1819          */
1820         if (dev->lost_n_found)
1821                 yaffs_add_obj_to_dir(dev->lost_n_found, obj);
1822
1823         obj->being_created = 0;
1824
1825         dev->checkpoint_blocks_required = 0;    /* force recalculation */
1826
1827         return obj;
1828 }
1829
1830 static int yaffs_find_nice_bucket(struct yaffs_dev *dev)
1831 {
1832         int i;
1833         int l = 999;
1834         int lowest = 999999;
1835
1836         /* Search for the shortest list or one that
1837          * isn't too long.
1838          */
1839
1840         for (i = 0; i < 10 && lowest > 4; i++) {
1841                 dev->bucket_finder++;
1842                 dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS;
1843                 if (dev->obj_bucket[dev->bucket_finder].count < lowest) {
1844                         lowest = dev->obj_bucket[dev->bucket_finder].count;
1845                         l = dev->bucket_finder;
1846                 }
1847         }
1848
1849         return l;
1850 }
1851
1852 static int yaffs_new_obj_id(struct yaffs_dev *dev)
1853 {
1854         int bucket = yaffs_find_nice_bucket(dev);
1855         int found = 0;
1856         struct list_head *i;
1857         u32 n = (u32) bucket;
1858
1859         /* Now find an object value that has not already been taken
1860          * by scanning the list.
1861          */
1862
1863         while (!found) {
1864                 found = 1;
1865                 n += YAFFS_NOBJECT_BUCKETS;
1866                 if (1 || dev->obj_bucket[bucket].count > 0) {
1867                         list_for_each(i, &dev->obj_bucket[bucket].list) {
1868                                 /* If there is already one in the list */
1869                                 if (i && list_entry(i, struct yaffs_obj,
1870                                                     hash_link)->obj_id == n) {
1871                                         found = 0;
1872                                 }
1873                         }
1874                 }
1875         }
1876         return n;
1877 }
1878
1879 static void yaffs_hash_obj(struct yaffs_obj *in)
1880 {
1881         int bucket = yaffs_hash_fn(in->obj_id);
1882         struct yaffs_dev *dev = in->my_dev;
1883
1884         list_add(&in->hash_link, &dev->obj_bucket[bucket].list);
1885         dev->obj_bucket[bucket].count++;
1886 }
1887
1888 struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number)
1889 {
1890         int bucket = yaffs_hash_fn(number);
1891         struct list_head *i;
1892         struct yaffs_obj *in;
1893
1894         list_for_each(i, &dev->obj_bucket[bucket].list) {
1895                 /* Look if it is in the list */
1896                 in = list_entry(i, struct yaffs_obj, hash_link);
1897                 if (in->obj_id == number) {
1898                         /* Don't show if it is defered free */
1899                         if (in->defered_free)
1900                                 return NULL;
1901                         return in;
1902                 }
1903         }
1904
1905         return NULL;
1906 }
1907
1908 static struct yaffs_obj *yaffs_new_obj(struct yaffs_dev *dev, int number,
1909                                 enum yaffs_obj_type type)
1910 {
1911         struct yaffs_obj *the_obj = NULL;
1912         struct yaffs_tnode *tn = NULL;
1913
1914         if (number < 0)
1915                 number = yaffs_new_obj_id(dev);
1916
1917         if (type == YAFFS_OBJECT_TYPE_FILE) {
1918                 tn = yaffs_get_tnode(dev);
1919                 if (!tn)
1920                         return NULL;
1921         }
1922
1923         the_obj = yaffs_alloc_empty_obj(dev);
1924         if (!the_obj) {
1925                 if (tn)
1926                         yaffs_free_tnode(dev, tn);
1927                 return NULL;
1928         }
1929
1930         the_obj->fake = 0;
1931         the_obj->rename_allowed = 1;
1932         the_obj->unlink_allowed = 1;
1933         the_obj->obj_id = number;
1934         yaffs_hash_obj(the_obj);
1935         the_obj->variant_type = type;
1936         yaffs_load_current_time(the_obj, 1, 1);
1937
1938         switch (type) {
1939         case YAFFS_OBJECT_TYPE_FILE:
1940                 the_obj->variant.file_variant.file_size = 0;
1941                 the_obj->variant.file_variant.scanned_size = 0;
1942                 the_obj->variant.file_variant.shrink_size =
1943                                                 yaffs_max_file_size(dev);
1944                 the_obj->variant.file_variant.top_level = 0;
1945                 the_obj->variant.file_variant.top = tn;
1946                 break;
1947         case YAFFS_OBJECT_TYPE_DIRECTORY:
1948                 INIT_LIST_HEAD(&the_obj->variant.dir_variant.children);
1949                 INIT_LIST_HEAD(&the_obj->variant.dir_variant.dirty);
1950                 break;
1951         case YAFFS_OBJECT_TYPE_SYMLINK:
1952         case YAFFS_OBJECT_TYPE_HARDLINK:
1953         case YAFFS_OBJECT_TYPE_SPECIAL:
1954                 /* No action required */
1955                 break;
1956         case YAFFS_OBJECT_TYPE_UNKNOWN:
1957                 /* todo this should not happen */
1958                 break;
1959         }
1960         return the_obj;
1961 }
1962
1963 static struct yaffs_obj *yaffs_create_fake_dir(struct yaffs_dev *dev,
1964                                                int number, u32 mode)
1965 {
1966
1967         struct yaffs_obj *obj =
1968             yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
1969
1970         if (!obj)
1971                 return NULL;
1972
1973         obj->fake = 1;  /* it is fake so it might not use NAND */
1974         obj->rename_allowed = 0;
1975         obj->unlink_allowed = 0;
1976         obj->deleted = 0;
1977         obj->unlinked = 0;
1978         obj->yst_mode = mode;
1979         obj->my_dev = dev;
1980         obj->hdr_chunk = 0;     /* Not a valid chunk. */
1981         return obj;
1982
1983 }
1984
1985
1986 static void yaffs_init_tnodes_and_objs(struct yaffs_dev *dev)
1987 {
1988         int i;
1989
1990         dev->n_obj = 0;
1991         dev->n_tnodes = 0;
1992         yaffs_init_raw_tnodes_and_objs(dev);
1993
1994         for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
1995                 INIT_LIST_HEAD(&dev->obj_bucket[i].list);
1996                 dev->obj_bucket[i].count = 0;
1997         }
1998 }
1999
2000 struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
2001                                                  int number,
2002                                                  enum yaffs_obj_type type)
2003 {
2004         struct yaffs_obj *the_obj = NULL;
2005
2006         if (number > 0)
2007                 the_obj = yaffs_find_by_number(dev, number);
2008
2009         if (!the_obj)
2010                 the_obj = yaffs_new_obj(dev, number, type);
2011
2012         return the_obj;
2013
2014 }
2015
2016 YCHAR *yaffs_clone_str(const YCHAR *str)
2017 {
2018         YCHAR *new_str = NULL;
2019         int len;
2020
2021         if (!str)
2022                 str = _Y("");
2023
2024         len = strnlen(str, YAFFS_MAX_ALIAS_LENGTH);
2025         new_str = kmalloc((len + 1) * sizeof(YCHAR), GFP_NOFS);
2026         if (new_str) {
2027                 strncpy(new_str, str, len);
2028                 new_str[len] = 0;
2029         }
2030         return new_str;
2031
2032 }
2033 /*
2034  *yaffs_update_parent() handles fixing a directories mtime and ctime when a new
2035  * link (ie. name) is created or deleted in the directory.
2036  *
2037  * ie.
2038  *   create dir/a : update dir's mtime/ctime
2039  *   rm dir/a:   update dir's mtime/ctime
2040  *   modify dir/a: don't update dir's mtimme/ctime
2041  *
2042  * This can be handled immediately or defered. Defering helps reduce the number
2043  * of updates when many files in a directory are changed within a brief period.
2044  *
2045  * If the directory updating is defered then yaffs_update_dirty_dirs must be
2046  * called periodically.
2047  */
2048
2049 static void yaffs_update_parent(struct yaffs_obj *obj)
2050 {
2051         struct yaffs_dev *dev;
2052
2053         if (!obj)
2054                 return;
2055         dev = obj->my_dev;
2056         obj->dirty = 1;
2057         yaffs_load_current_time(obj, 0, 1);
2058         if (dev->param.defered_dir_update) {
2059                 struct list_head *link = &obj->variant.dir_variant.dirty;
2060
2061                 if (list_empty(link)) {
2062                         list_add(link, &dev->dirty_dirs);
2063                         yaffs_trace(YAFFS_TRACE_BACKGROUND,
2064                           "Added object %d to dirty directories",
2065                            obj->obj_id);
2066                 }
2067
2068         } else {
2069                 yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
2070         }
2071 }
2072
2073 void yaffs_update_dirty_dirs(struct yaffs_dev *dev)
2074 {
2075         struct list_head *link;
2076         struct yaffs_obj *obj;
2077         struct yaffs_dir_var *d_s;
2078         union yaffs_obj_var *o_v;
2079
2080         yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update dirty directories");
2081
2082         while (!list_empty(&dev->dirty_dirs)) {
2083                 link = dev->dirty_dirs.next;
2084                 list_del_init(link);
2085
2086                 d_s = list_entry(link, struct yaffs_dir_var, dirty);
2087                 o_v = list_entry(d_s, union yaffs_obj_var, dir_variant);
2088                 obj = list_entry(o_v, struct yaffs_obj, variant);
2089
2090                 yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update directory %d",
2091                         obj->obj_id);
2092
2093                 if (obj->dirty)
2094                         yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
2095         }
2096 }
2097
2098 /*
2099  * Mknod (create) a new object.
2100  * equiv_obj only has meaning for a hard link;
2101  * alias_str only has meaning for a symlink.
2102  * rdev only has meaning for devices (a subset of special objects)
2103  */
2104
2105 static struct yaffs_obj *yaffs_create_obj(enum yaffs_obj_type type,
2106                                           struct yaffs_obj *parent,
2107                                           const YCHAR *name,
2108                                           u32 mode,
2109                                           u32 uid,
2110                                           u32 gid,
2111                                           struct yaffs_obj *equiv_obj,
2112                                           const YCHAR *alias_str, u32 rdev)
2113 {
2114         struct yaffs_obj *in;
2115         YCHAR *str = NULL;
2116         struct yaffs_dev *dev = parent->my_dev;
2117
2118         /* Check if the entry exists.
2119          * If it does then fail the call since we don't want a dup. */
2120         if (yaffs_find_by_name(parent, name))
2121                 return NULL;
2122
2123         if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
2124                 str = yaffs_clone_str(alias_str);
2125                 if (!str)
2126                         return NULL;
2127         }
2128
2129         in = yaffs_new_obj(dev, -1, type);
2130
2131         if (!in) {
2132                 kfree(str);
2133                 return NULL;
2134         }
2135
2136         in->hdr_chunk = 0;
2137         in->valid = 1;
2138         in->variant_type = type;
2139
2140         in->yst_mode = mode;
2141
2142         yaffs_attribs_init(in, gid, uid, rdev);
2143
2144         in->n_data_chunks = 0;
2145
2146         yaffs_set_obj_name(in, name);
2147         in->dirty = 1;
2148
2149         yaffs_add_obj_to_dir(parent, in);
2150
2151         in->my_dev = parent->my_dev;
2152
2153         switch (type) {
2154         case YAFFS_OBJECT_TYPE_SYMLINK:
2155                 in->variant.symlink_variant.alias = str;
2156                 break;
2157         case YAFFS_OBJECT_TYPE_HARDLINK:
2158                 in->variant.hardlink_variant.equiv_obj = equiv_obj;
2159                 in->variant.hardlink_variant.equiv_id = equiv_obj->obj_id;
2160                 list_add(&in->hard_links, &equiv_obj->hard_links);
2161                 break;
2162         case YAFFS_OBJECT_TYPE_FILE:
2163         case YAFFS_OBJECT_TYPE_DIRECTORY:
2164         case YAFFS_OBJECT_TYPE_SPECIAL:
2165         case YAFFS_OBJECT_TYPE_UNKNOWN:
2166                 /* do nothing */
2167                 break;
2168         }
2169
2170         if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) {
2171                 /* Could not create the object header, fail */
2172                 yaffs_del_obj(in);
2173                 in = NULL;
2174         }
2175
2176         if (in)
2177                 yaffs_update_parent(parent);
2178
2179         return in;
2180 }
2181
2182 struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
2183                                     const YCHAR *name, u32 mode, u32 uid,
2184                                     u32 gid)
2185 {
2186         return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
2187                                 uid, gid, NULL, NULL, 0);
2188 }
2189
2190 struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name,
2191                                    u32 mode, u32 uid, u32 gid)
2192 {
2193         return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
2194                                 mode, uid, gid, NULL, NULL, 0);
2195 }
2196
2197 struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
2198                                        const YCHAR *name, u32 mode, u32 uid,
2199                                        u32 gid, u32 rdev)
2200 {
2201         return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
2202                                 uid, gid, NULL, NULL, rdev);
2203 }
2204
2205 struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
2206                                        const YCHAR *name, u32 mode, u32 uid,
2207                                        u32 gid, const YCHAR *alias)
2208 {
2209         return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
2210                                 uid, gid, NULL, alias, 0);
2211 }
2212
2213 /* yaffs_link_obj returns the object id of the equivalent object.*/
2214 struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name,
2215                                  struct yaffs_obj *equiv_obj)
2216 {
2217         /* Get the real object in case we were fed a hard link obj */
2218         equiv_obj = yaffs_get_equivalent_obj(equiv_obj);
2219
2220         if (yaffs_create_obj(YAFFS_OBJECT_TYPE_HARDLINK,
2221                         parent, name, 0, 0, 0,
2222                         equiv_obj, NULL, 0))
2223                 return equiv_obj;
2224
2225         return NULL;
2226
2227 }
2228
2229
2230
2231 /*---------------------- Block Management and Page Allocation -------------*/
2232
2233 static void yaffs_deinit_blocks(struct yaffs_dev *dev)
2234 {
2235         if (dev->block_info_alt && dev->block_info)
2236                 vfree(dev->block_info);
2237         else
2238                 kfree(dev->block_info);
2239
2240         dev->block_info_alt = 0;
2241
2242         dev->block_info = NULL;
2243
2244         if (dev->chunk_bits_alt && dev->chunk_bits)
2245                 vfree(dev->chunk_bits);
2246         else
2247                 kfree(dev->chunk_bits);
2248         dev->chunk_bits_alt = 0;
2249         dev->chunk_bits = NULL;
2250 }
2251
2252 static int yaffs_init_blocks(struct yaffs_dev *dev)
2253 {
2254         int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
2255
2256         dev->block_info = NULL;
2257         dev->chunk_bits = NULL;
2258         dev->alloc_block = -1;  /* force it to get a new one */
2259
2260         /* If the first allocation strategy fails, thry the alternate one */
2261         dev->block_info =
2262                 kmalloc(n_blocks * sizeof(struct yaffs_block_info), GFP_NOFS);
2263         if (!dev->block_info) {
2264                 dev->block_info =
2265                     vmalloc(n_blocks * sizeof(struct yaffs_block_info));
2266                 dev->block_info_alt = 1;
2267         } else {
2268                 dev->block_info_alt = 0;
2269         }
2270
2271         if (!dev->block_info)
2272                 goto alloc_error;
2273
2274         /* Set up dynamic blockinfo stuff. Round up bytes. */
2275         dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8;
2276         dev->chunk_bits =
2277                 kmalloc(dev->chunk_bit_stride * n_blocks, GFP_NOFS);
2278         if (!dev->chunk_bits) {
2279                 dev->chunk_bits =
2280                     vmalloc(dev->chunk_bit_stride * n_blocks);
2281                 dev->chunk_bits_alt = 1;
2282         } else {
2283                 dev->chunk_bits_alt = 0;
2284         }
2285         if (!dev->chunk_bits)
2286                 goto alloc_error;
2287
2288
2289         memset(dev->block_info, 0, n_blocks * sizeof(struct yaffs_block_info));
2290         memset(dev->chunk_bits, 0, dev->chunk_bit_stride * n_blocks);
2291         return YAFFS_OK;
2292
2293 alloc_error:
2294         yaffs_deinit_blocks(dev);
2295         return YAFFS_FAIL;
2296 }
2297
2298
2299 void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no)
2300 {
2301         struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no);
2302         int erased_ok = 0;
2303         int i;
2304
2305         /* If the block is still healthy erase it and mark as clean.
2306          * If the block has had a data failure, then retire it.
2307          */
2308
2309         yaffs_trace(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
2310                 "yaffs_block_became_dirty block %d state %d %s",
2311                 block_no, bi->block_state,
2312                 (bi->needs_retiring) ? "needs retiring" : "");
2313
2314         yaffs2_clear_oldest_dirty_seq(dev, bi);
2315
2316         bi->block_state = YAFFS_BLOCK_STATE_DIRTY;
2317
2318         /* If this is the block being garbage collected then stop gc'ing */
2319         if (block_no == dev->gc_block)
2320                 dev->gc_block = 0;
2321
2322         /* If this block is currently the best candidate for gc
2323          * then drop as a candidate */
2324         if (block_no == dev->gc_dirtiest) {
2325                 dev->gc_dirtiest = 0;
2326                 dev->gc_pages_in_use = 0;
2327         }
2328
2329         if (!bi->needs_retiring) {
2330                 yaffs2_checkpt_invalidate(dev);
2331                 erased_ok = yaffs_erase_block(dev, block_no);
2332                 if (!erased_ok) {
2333                         dev->n_erase_failures++;
2334                         yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
2335                           "**>> Erasure failed %d", block_no);
2336                 }
2337         }
2338
2339         /* Verify erasure if needed */
2340         if (erased_ok &&
2341             ((yaffs_trace_mask & YAFFS_TRACE_ERASE) ||
2342              !yaffs_skip_verification(dev))) {
2343                 for (i = 0; i < dev->param.chunks_per_block; i++) {
2344                         if (!yaffs_check_chunk_erased(dev,
2345                                 block_no * dev->param.chunks_per_block + i)) {
2346                                 yaffs_trace(YAFFS_TRACE_ERROR,
2347                                         ">>Block %d erasure supposedly OK, but chunk %d not erased",
2348                                         block_no, i);
2349                         }
2350                 }
2351         }
2352
2353         if (!erased_ok) {
2354                 /* We lost a block of free space */
2355                 dev->n_free_chunks -= dev->param.chunks_per_block;
2356                 yaffs_retire_block(dev, block_no);
2357                 yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
2358                         "**>> Block %d retired", block_no);
2359                 return;
2360         }
2361
2362         /* Clean it up... */
2363         bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
2364         bi->seq_number = 0;
2365         dev->n_erased_blocks++;
2366         bi->pages_in_use = 0;
2367         bi->soft_del_pages = 0;
2368         bi->has_shrink_hdr = 0;
2369         bi->skip_erased_check = 1;      /* Clean, so no need to check */
2370         bi->gc_prioritise = 0;
2371         bi->has_summary = 0;
2372
2373         yaffs_clear_chunk_bits(dev, block_no);
2374
2375         yaffs_trace(YAFFS_TRACE_ERASE, "Erased block %d", block_no);
2376 }
2377
2378 static inline int yaffs_gc_process_chunk(struct yaffs_dev *dev,
2379                                         struct yaffs_block_info *bi,
2380                                         int old_chunk, u8 *buffer)
2381 {
2382         int new_chunk;
2383         int mark_flash = 1;
2384         struct yaffs_ext_tags tags;
2385         struct yaffs_obj *object;
2386         int matching_chunk;
2387         int ret_val = YAFFS_OK;
2388
2389         memset(&tags, 0, sizeof(tags));
2390         yaffs_rd_chunk_tags_nand(dev, old_chunk,
2391                                  buffer, &tags);
2392         object = yaffs_find_by_number(dev, tags.obj_id);
2393
2394         yaffs_trace(YAFFS_TRACE_GC_DETAIL,
2395                 "Collecting chunk in block %d, %d %d %d ",
2396                 dev->gc_chunk, tags.obj_id,
2397                 tags.chunk_id, tags.n_bytes);
2398
2399         if (object && !yaffs_skip_verification(dev)) {
2400                 if (tags.chunk_id == 0)
2401                         matching_chunk =
2402                             object->hdr_chunk;
2403                 else if (object->soft_del)
2404                         /* Defeat the test */
2405                         matching_chunk = old_chunk;
2406                 else
2407                         matching_chunk =
2408                             yaffs_find_chunk_in_file
2409                             (object, tags.chunk_id,
2410                              NULL);
2411
2412                 if (old_chunk != matching_chunk)
2413                         yaffs_trace(YAFFS_TRACE_ERROR,
2414                                 "gc: page in gc mismatch: %d %d %d %d",
2415                                 old_chunk,
2416                                 matching_chunk,
2417                                 tags.obj_id,
2418                                 tags.chunk_id);
2419         }
2420
2421         if (!object) {
2422                 yaffs_trace(YAFFS_TRACE_ERROR,
2423                         "page %d in gc has no object: %d %d %d ",
2424                         old_chunk,
2425                         tags.obj_id, tags.chunk_id,
2426                         tags.n_bytes);
2427         }
2428
2429         if (object &&
2430             object->deleted &&
2431             object->soft_del && tags.chunk_id != 0) {
2432                 /* Data chunk in a soft deleted file,
2433                  * throw it away.
2434                  * It's a soft deleted data chunk,
2435                  * No need to copy this, just forget
2436                  * about it and fix up the object.
2437                  */
2438
2439                 /* Free chunks already includes
2440                  * softdeleted chunks, how ever this
2441                  * chunk is going to soon be really
2442                  * deleted which will increment free
2443                  * chunks. We have to decrement free
2444                  * chunks so this works out properly.
2445                  */
2446                 dev->n_free_chunks--;
2447                 bi->soft_del_pages--;
2448
2449                 object->n_data_chunks--;
2450                 if (object->n_data_chunks <= 0) {
2451                         /* remeber to clean up obj */
2452                         dev->gc_cleanup_list[dev->n_clean_ups] = tags.obj_id;
2453                         dev->n_clean_ups++;
2454                 }
2455                 mark_flash = 0;
2456         } else if (object) {
2457                 /* It's either a data chunk in a live
2458                  * file or an ObjectHeader, so we're
2459                  * interested in it.
2460                  * NB Need to keep the ObjectHeaders of
2461                  * deleted files until the whole file
2462                  * has been deleted off
2463                  */
2464                 tags.serial_number++;
2465                 dev->n_gc_copies++;
2466
2467                 if (tags.chunk_id == 0) {
2468                         /* It is an object Id,
2469                          * We need to nuke the
2470                          * shrinkheader flags since its
2471                          * work is done.
2472                          * Also need to clean up
2473                          * shadowing.
2474                          */
2475                         struct yaffs_obj_hdr *oh;
2476                         oh = (struct yaffs_obj_hdr *) buffer;
2477
2478                         oh->is_shrink = 0;
2479                         tags.extra_is_shrink = 0;
2480                         oh->shadows_obj = 0;
2481                         oh->inband_shadowed_obj_id = 0;
2482                         tags.extra_shadows = 0;
2483
2484                         /* Update file size */
2485                         if (object->variant_type == YAFFS_OBJECT_TYPE_FILE) {
2486                                 yaffs_oh_size_load(oh,
2487                                     object->variant.file_variant.file_size);
2488                                 tags.extra_file_size =
2489                                     object->variant.file_variant.file_size;
2490                         }
2491
2492                         yaffs_verify_oh(object, oh, &tags, 1);
2493                         new_chunk =
2494                             yaffs_write_new_chunk(dev, (u8 *) oh, &tags, 1);
2495                 } else {
2496                         new_chunk =
2497                             yaffs_write_new_chunk(dev, buffer, &tags, 1);
2498                 }
2499
2500                 if (new_chunk < 0) {
2501                         ret_val = YAFFS_FAIL;
2502                 } else {
2503
2504                         /* Now fix up the Tnodes etc. */
2505
2506                         if (tags.chunk_id == 0) {
2507                                 /* It's a header */
2508                                 object->hdr_chunk = new_chunk;
2509                                 object->serial = tags.serial_number;
2510                         } else {
2511                                 /* It's a data chunk */
2512                                 yaffs_put_chunk_in_file(object, tags.chunk_id,
2513                                                         new_chunk, 0);
2514                         }
2515                 }
2516         }
2517         if (ret_val == YAFFS_OK)
2518                 yaffs_chunk_del(dev, old_chunk, mark_flash, __LINE__);
2519         return ret_val;
2520 }
2521
2522 static int yaffs_gc_block(struct yaffs_dev *dev, int block, int whole_block)
2523 {
2524         int old_chunk;
2525         int ret_val = YAFFS_OK;
2526         int i;
2527         int is_checkpt_block;
2528         int max_copies;
2529         int chunks_before = yaffs_get_erased_chunks(dev);
2530         int chunks_after;
2531         struct yaffs_block_info *bi = yaffs_get_block_info(dev, block);
2532
2533         is_checkpt_block = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT);
2534
2535         yaffs_trace(YAFFS_TRACE_TRACING,
2536                 "Collecting block %d, in use %d, shrink %d, whole_block %d",
2537                 block, bi->pages_in_use, bi->has_shrink_hdr,
2538                 whole_block);
2539
2540         /*yaffs_verify_free_chunks(dev); */
2541
2542         if (bi->block_state == YAFFS_BLOCK_STATE_FULL)
2543                 bi->block_state = YAFFS_BLOCK_STATE_COLLECTING;
2544
2545         bi->has_shrink_hdr = 0; /* clear the flag so that the block can erase */
2546
2547         dev->gc_disable = 1;
2548
2549         yaffs_summary_gc(dev, block);
2550
2551         if (is_checkpt_block || !yaffs_still_some_chunks(dev, block)) {
2552                 yaffs_trace(YAFFS_TRACE_TRACING,
2553                         "Collecting block %d that has no chunks in use",
2554                         block);
2555                 yaffs_block_became_dirty(dev, block);
2556         } else {
2557
2558                 u8 *buffer = yaffs_get_temp_buffer(dev);
2559
2560                 yaffs_verify_blk(dev, bi, block);
2561
2562                 max_copies = (whole_block) ? dev->param.chunks_per_block : 5;
2563                 old_chunk = block * dev->param.chunks_per_block + dev->gc_chunk;
2564
2565                 for (/* init already done */ ;
2566                      ret_val == YAFFS_OK &&
2567                      dev->gc_chunk < dev->param.chunks_per_block &&
2568                      (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) &&
2569                      max_copies > 0;
2570                      dev->gc_chunk++, old_chunk++) {
2571                         if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) {
2572                                 /* Page is in use and might need to be copied */
2573                                 max_copies--;
2574                                 ret_val = yaffs_gc_process_chunk(dev, bi,
2575                                                         old_chunk, buffer);
2576                         }
2577                 }
2578                 yaffs_release_temp_buffer(dev, buffer);
2579         }
2580
2581         yaffs_verify_collected_blk(dev, bi, block);
2582
2583         if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
2584                 /*
2585                  * The gc did not complete. Set block state back to FULL
2586                  * because checkpointing does not restore gc.
2587                  */
2588                 bi->block_state = YAFFS_BLOCK_STATE_FULL;
2589         } else {
2590                 /* The gc completed. */
2591                 /* Do any required cleanups */
2592                 for (i = 0; i < dev->n_clean_ups; i++) {
2593                         /* Time to delete the file too */
2594                         struct yaffs_obj *object =
2595                             yaffs_find_by_number(dev, dev->gc_cleanup_list[i]);
2596                         if (object) {
2597                                 yaffs_free_tnode(dev,
2598                                           object->variant.file_variant.top);
2599                                 object->variant.file_variant.top = NULL;
2600                                 yaffs_trace(YAFFS_TRACE_GC,
2601                                         "yaffs: About to finally delete object %d",
2602                                         object->obj_id);
2603                                 yaffs_generic_obj_del(object);
2604                                 object->my_dev->n_deleted_files--;
2605                         }
2606
2607                 }
2608                 chunks_after = yaffs_get_erased_chunks(dev);
2609                 if (chunks_before >= chunks_after)
2610                         yaffs_trace(YAFFS_TRACE_GC,
2611                                 "gc did not increase free chunks before %d after %d",
2612                                 chunks_before, chunks_after);
2613                 dev->gc_block = 0;
2614                 dev->gc_chunk = 0;
2615                 dev->n_clean_ups = 0;
2616         }
2617
2618         dev->gc_disable = 0;
2619
2620         return ret_val;
2621 }
2622
2623 /*
2624  * find_gc_block() selects the dirtiest block (or close enough)
2625  * for garbage collection.
2626  */
2627
2628 static unsigned yaffs_find_gc_block(struct yaffs_dev *dev,
2629                                     int aggressive, int background)
2630 {
2631         int i;
2632         int iterations;
2633         unsigned selected = 0;
2634         int prioritised = 0;
2635         int prioritised_exist = 0;
2636         struct yaffs_block_info *bi;
2637         int threshold;
2638
2639         /* First let's see if we need to grab a prioritised block */
2640         if (dev->has_pending_prioritised_gc && !aggressive) {
2641                 dev->gc_dirtiest = 0;
2642                 bi = dev->block_info;
2643                 for (i = dev->internal_start_block;
2644                      i <= dev->internal_end_block && !selected; i++) {
2645
2646                         if (bi->gc_prioritise) {
2647                                 prioritised_exist = 1;
2648                                 if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
2649                                     yaffs_block_ok_for_gc(dev, bi)) {
2650                                         selected = i;
2651                                         prioritised = 1;
2652                                 }
2653                         }
2654                         bi++;
2655                 }
2656
2657                 /*
2658                  * If there is a prioritised block and none was selected then
2659                  * this happened because there is at least one old dirty block
2660                  * gumming up the works. Let's gc the oldest dirty block.
2661                  */
2662
2663                 if (prioritised_exist &&
2664                     !selected && dev->oldest_dirty_block > 0)
2665                         selected = dev->oldest_dirty_block;
2666
2667                 if (!prioritised_exist) /* None found, so we can clear this */
2668                         dev->has_pending_prioritised_gc = 0;
2669         }
2670
2671         /* If we're doing aggressive GC then we are happy to take a less-dirty
2672          * block, and search harder.
2673          * else (leasurely gc), then we only bother to do this if the
2674          * block has only a few pages in use.
2675          */
2676
2677         if (!selected) {
2678                 int pages_used;
2679                 int n_blocks =
2680                     dev->internal_end_block - dev->internal_start_block + 1;
2681                 if (aggressive) {
2682                         threshold = dev->param.chunks_per_block;
2683                         iterations = n_blocks;
2684                 } else {
2685                         int max_threshold;
2686
2687                         if (background)
2688                                 max_threshold = dev->param.chunks_per_block / 2;
2689                         else
2690                                 max_threshold = dev->param.chunks_per_block / 8;
2691
2692                         if (max_threshold < YAFFS_GC_PASSIVE_THRESHOLD)
2693                                 max_threshold = YAFFS_GC_PASSIVE_THRESHOLD;
2694
2695                         threshold = background ? (dev->gc_not_done + 2) * 2 : 0;
2696                         if (threshold < YAFFS_GC_PASSIVE_THRESHOLD)
2697                                 threshold = YAFFS_GC_PASSIVE_THRESHOLD;
2698                         if (threshold > max_threshold)
2699                                 threshold = max_threshold;
2700
2701                         iterations = n_blocks / 16 + 1;
2702                         if (iterations > 100)
2703                                 iterations = 100;
2704                 }
2705
2706                 for (i = 0;
2707                      i < iterations &&
2708                      (dev->gc_dirtiest < 1 ||
2709                       dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH);
2710                      i++) {
2711                         dev->gc_block_finder++;
2712                         if (dev->gc_block_finder < dev->internal_start_block ||
2713                             dev->gc_block_finder > dev->internal_end_block)
2714                                 dev->gc_block_finder =
2715                                     dev->internal_start_block;
2716
2717                         bi = yaffs_get_block_info(dev, dev->gc_block_finder);
2718
2719                         pages_used = bi->pages_in_use - bi->soft_del_pages;
2720
2721                         if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
2722                             pages_used < dev->param.chunks_per_block &&
2723                             (dev->gc_dirtiest < 1 ||
2724                              pages_used < dev->gc_pages_in_use) &&
2725                             yaffs_block_ok_for_gc(dev, bi)) {
2726                                 dev->gc_dirtiest = dev->gc_block_finder;
2727                                 dev->gc_pages_in_use = pages_used;
2728                         }
2729                 }
2730
2731                 if (dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold)
2732                         selected = dev->gc_dirtiest;
2733         }
2734
2735         /*
2736          * If nothing has been selected for a while, try the oldest dirty
2737          * because that's gumming up the works.
2738          */
2739
2740         if (!selected && dev->param.is_yaffs2 &&
2741             dev->gc_not_done >= (background ? 10 : 20)) {
2742                 yaffs2_find_oldest_dirty_seq(dev);
2743                 if (dev->oldest_dirty_block > 0) {
2744                         selected = dev->oldest_dirty_block;
2745                         dev->gc_dirtiest = selected;
2746                         dev->oldest_dirty_gc_count++;
2747                         bi = yaffs_get_block_info(dev, selected);
2748                         dev->gc_pages_in_use =
2749                             bi->pages_in_use - bi->soft_del_pages;
2750                 } else {
2751                         dev->gc_not_done = 0;
2752                 }
2753         }
2754
2755         if (selected) {
2756                 yaffs_trace(YAFFS_TRACE_GC,
2757                         "GC Selected block %d with %d free, prioritised:%d",
2758                         selected,
2759                         dev->param.chunks_per_block - dev->gc_pages_in_use,
2760                         prioritised);
2761
2762                 dev->n_gc_blocks++;
2763                 if (background)
2764                         dev->bg_gcs++;
2765
2766                 dev->gc_dirtiest = 0;
2767                 dev->gc_pages_in_use = 0;
2768                 dev->gc_not_done = 0;
2769                 if (dev->refresh_skip > 0)
2770                         dev->refresh_skip--;
2771         } else {
2772                 dev->gc_not_done++;
2773                 yaffs_trace(YAFFS_TRACE_GC,
2774                         "GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s",
2775                         dev->gc_block_finder, dev->gc_not_done, threshold,
2776                         dev->gc_dirtiest, dev->gc_pages_in_use,
2777                         dev->oldest_dirty_block, background ? " bg" : "");
2778         }
2779
2780         return selected;
2781 }
2782
2783 /* New garbage collector
2784  * If we're very low on erased blocks then we do aggressive garbage collection
2785  * otherwise we do "leasurely" garbage collection.
2786  * Aggressive gc looks further (whole array) and will accept less dirty blocks.
2787  * Passive gc only inspects smaller areas and only accepts more dirty blocks.
2788  *
2789  * The idea is to help clear out space in a more spread-out manner.
2790  * Dunno if it really does anything useful.
2791  */
2792 static int yaffs_check_gc(struct yaffs_dev *dev, int background)
2793 {
2794         int aggressive = 0;
2795         int gc_ok = YAFFS_OK;
2796         int max_tries = 0;
2797         int min_erased;
2798         int erased_chunks;
2799         int checkpt_block_adjust;
2800
2801         if (dev->param.gc_control && (dev->param.gc_control(dev) & 1) == 0)
2802                 return YAFFS_OK;
2803
2804         if (dev->gc_disable)
2805                 /* Bail out so we don't get recursive gc */
2806                 return YAFFS_OK;
2807
2808         /* This loop should pass the first time.
2809          * Only loops here if the collection does not increase space.
2810          */
2811
2812         do {
2813                 max_tries++;
2814
2815                 checkpt_block_adjust = yaffs_calc_checkpt_blocks_required(dev);
2816
2817                 min_erased =
2818                     dev->param.n_reserved_blocks + checkpt_block_adjust + 1;
2819                 erased_chunks =
2820                     dev->n_erased_blocks * dev->param.chunks_per_block;
2821
2822                 /* If we need a block soon then do aggressive gc. */
2823                 if (dev->n_erased_blocks < min_erased)
2824                         aggressive = 1;
2825                 else {
2826                         if (!background
2827                             && erased_chunks > (dev->n_free_chunks / 4))
2828                                 break;
2829
2830                         if (dev->gc_skip > 20)
2831                                 dev->gc_skip = 20;
2832                         if (erased_chunks < dev->n_free_chunks / 2 ||
2833                             dev->gc_skip < 1 || background)
2834                                 aggressive = 0;
2835                         else {
2836                                 dev->gc_skip--;
2837                                 break;
2838                         }
2839                 }
2840
2841                 dev->gc_skip = 5;
2842
2843                 /* If we don't already have a block being gc'd then see if we
2844                  * should start another */
2845
2846                 if (dev->gc_block < 1 && !aggressive) {
2847                         dev->gc_block = yaffs2_find_refresh_block(dev);
2848                         dev->gc_chunk = 0;
2849                         dev->n_clean_ups = 0;
2850                 }
2851                 if (dev->gc_block < 1) {
2852                         dev->gc_block =
2853                             yaffs_find_gc_block(dev, aggressive, background);
2854                         dev->gc_chunk = 0;
2855                         dev->n_clean_ups = 0;
2856                 }
2857
2858                 if (dev->gc_block > 0) {
2859                         dev->all_gcs++;
2860                         if (!aggressive)
2861                                 dev->passive_gc_count++;
2862
2863                         yaffs_trace(YAFFS_TRACE_GC,
2864                                 "yaffs: GC n_erased_blocks %d aggressive %d",
2865                                 dev->n_erased_blocks, aggressive);
2866
2867                         gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive);
2868                 }
2869
2870                 if (dev->n_erased_blocks < (dev->param.n_reserved_blocks) &&
2871                     dev->gc_block > 0) {
2872                         yaffs_trace(YAFFS_TRACE_GC,
2873                                 "yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d",
2874                                 dev->n_erased_blocks, max_tries,
2875                                 dev->gc_block);
2876                 }
2877         } while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) &&
2878                  (dev->gc_block > 0) && (max_tries < 2));
2879
2880         return aggressive ? gc_ok : YAFFS_OK;
2881 }
2882
2883 /*
2884  * yaffs_bg_gc()
2885  * Garbage collects. Intended to be called from a background thread.
2886  * Returns non-zero if at least half the free chunks are erased.
2887  */
2888 int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency)
2889 {
2890         int erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block;
2891
2892         yaffs_trace(YAFFS_TRACE_BACKGROUND, "Background gc %u", urgency);
2893
2894         yaffs_check_gc(dev, 1);
2895         return erased_chunks > dev->n_free_chunks / 2;
2896 }
2897
2898 /*-------------------- Data file manipulation -----------------*/
2899
2900 static int yaffs_rd_data_obj(struct yaffs_obj *in, int inode_chunk, u8 * buffer)
2901 {
2902         int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL);
2903
2904         if (nand_chunk >= 0)
2905                 return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk,
2906                                                 buffer, NULL);
2907         else {
2908                 yaffs_trace(YAFFS_TRACE_NANDACCESS,
2909                         "Chunk %d not found zero instead",
2910                         nand_chunk);
2911                 /* get sane (zero) data if you read a hole */
2912                 memset(buffer, 0, in->my_dev->data_bytes_per_chunk);
2913                 return 0;
2914         }
2915
2916 }
2917
2918 void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
2919                      int lyn)
2920 {
2921         int block;
2922         int page;
2923         struct yaffs_ext_tags tags;
2924         struct yaffs_block_info *bi;
2925
2926         if (chunk_id <= 0)
2927                 return;
2928
2929         dev->n_deletions++;
2930         block = chunk_id / dev->param.chunks_per_block;
2931         page = chunk_id % dev->param.chunks_per_block;
2932
2933         if (!yaffs_check_chunk_bit(dev, block, page))
2934                 yaffs_trace(YAFFS_TRACE_VERIFY,
2935                         "Deleting invalid chunk %d", chunk_id);
2936
2937         bi = yaffs_get_block_info(dev, block);
2938
2939         yaffs2_update_oldest_dirty_seq(dev, block, bi);
2940
2941         yaffs_trace(YAFFS_TRACE_DELETION,
2942                 "line %d delete of chunk %d",
2943                 lyn, chunk_id);
2944
2945         if (!dev->param.is_yaffs2 && mark_flash &&
2946             bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) {
2947
2948                 memset(&tags, 0, sizeof(tags));
2949                 tags.is_deleted = 1;
2950                 yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags);
2951                 yaffs_handle_chunk_update(dev, chunk_id, &tags);
2952         } else {
2953                 dev->n_unmarked_deletions++;
2954         }
2955
2956         /* Pull out of the management area.
2957          * If the whole block became dirty, this will kick off an erasure.
2958          */
2959         if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING ||
2960             bi->block_state == YAFFS_BLOCK_STATE_FULL ||
2961             bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
2962             bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
2963                 dev->n_free_chunks++;
2964                 yaffs_clear_chunk_bit(dev, block, page);
2965                 bi->pages_in_use--;
2966
2967                 if (bi->pages_in_use == 0 &&
2968                     !bi->has_shrink_hdr &&
2969                     bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING &&
2970                     bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCAN) {
2971                         yaffs_block_became_dirty(dev, block);
2972                 }
2973         }
2974 }
2975
2976 static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
2977                              const u8 *buffer, int n_bytes, int use_reserve)
2978 {
2979         /* Find old chunk Need to do this to get serial number
2980          * Write new one and patch into tree.
2981          * Invalidate old tags.
2982          */
2983
2984         int prev_chunk_id;
2985         struct yaffs_ext_tags prev_tags;
2986         int new_chunk_id;
2987         struct yaffs_ext_tags new_tags;
2988         struct yaffs_dev *dev = in->my_dev;
2989
2990         yaffs_check_gc(dev, 0);
2991
2992         /* Get the previous chunk at this location in the file if it exists.
2993          * If it does not exist then put a zero into the tree. This creates
2994          * the tnode now, rather than later when it is harder to clean up.
2995          */
2996         prev_chunk_id = yaffs_find_chunk_in_file(in, inode_chunk, &prev_tags);
2997         if (prev_chunk_id < 1 &&
2998             !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0))
2999                 return 0;
3000
3001         /* Set up new tags */
3002         memset(&new_tags, 0, sizeof(new_tags));
3003
3004         new_tags.chunk_id = inode_chunk;
3005         new_tags.obj_id = in->obj_id;
3006         new_tags.serial_number =
3007             (prev_chunk_id > 0) ? prev_tags.serial_number + 1 : 1;
3008         new_tags.n_bytes = n_bytes;
3009
3010         if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) {
3011                 yaffs_trace(YAFFS_TRACE_ERROR,
3012                   "Writing %d bytes to chunk!!!!!!!!!",
3013                    n_bytes);
3014                 BUG();
3015         }
3016
3017         new_chunk_id =
3018             yaffs_write_new_chunk(dev, buffer, &new_tags, use_reserve);
3019
3020         if (new_chunk_id > 0) {
3021                 yaffs_put_chunk_in_file(in, inode_chunk, new_chunk_id, 0);
3022
3023                 if (prev_chunk_id > 0)
3024                         yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
3025
3026                 yaffs_verify_file_sane(in);
3027         }
3028         return new_chunk_id;
3029
3030 }
3031
3032
3033
3034 static int yaffs_do_xattrib_mod(struct yaffs_obj *obj, int set,
3035                                 const YCHAR *name, const void *value, int size,
3036                                 int flags)
3037 {
3038         struct yaffs_xattr_mod xmod;
3039         int result;
3040
3041         xmod.set = set;
3042         xmod.name = name;
3043         xmod.data = value;
3044         xmod.size = size;
3045         xmod.flags = flags;
3046         xmod.result = -ENOSPC;
3047
3048         result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod);
3049
3050         if (result > 0)
3051                 return xmod.result;
3052         else
3053                 return -ENOSPC;
3054 }
3055
3056 static int yaffs_apply_xattrib_mod(struct yaffs_obj *obj, char *buffer,
3057                                    struct yaffs_xattr_mod *xmod)
3058 {
3059         int retval = 0;
3060         int x_offs = sizeof(struct yaffs_obj_hdr);
3061         struct yaffs_dev *dev = obj->my_dev;
3062         int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
3063         char *x_buffer = buffer + x_offs;
3064
3065         if (xmod->set)
3066                 retval =
3067                     nval_set(x_buffer, x_size, xmod->name, xmod->data,
3068                              xmod->size, xmod->flags);
3069         else
3070                 retval = nval_del(x_buffer, x_size, xmod->name);
3071
3072         obj->has_xattr = nval_hasvalues(x_buffer, x_size);
3073         obj->xattr_known = 1;
3074         xmod->result = retval;
3075
3076         return retval;
3077 }
3078
3079 static int yaffs_do_xattrib_fetch(struct yaffs_obj *obj, const YCHAR *name,
3080                                   void *value, int size)
3081 {
3082         char *buffer = NULL;
3083         int result;
3084         struct yaffs_ext_tags tags;
3085         struct yaffs_dev *dev = obj->my_dev;
3086         int x_offs = sizeof(struct yaffs_obj_hdr);
3087         int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
3088         char *x_buffer;
3089         int retval = 0;
3090
3091         if (obj->hdr_chunk < 1)
3092                 return -ENODATA;
3093
3094         /* If we know that the object has no xattribs then don't do all the
3095          * reading and parsing.
3096          */
3097         if (obj->xattr_known && !obj->has_xattr) {
3098                 if (name)
3099                         return -ENODATA;
3100                 else
3101                         return 0;
3102         }
3103
3104         buffer = (char *)yaffs_get_temp_buffer(dev);
3105         if (!buffer)
3106                 return -ENOMEM;
3107
3108         result =
3109             yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, (u8 *) buffer, &tags);
3110
3111         if (result != YAFFS_OK)
3112                 retval = -ENOENT;
3113         else {
3114                 x_buffer = buffer + x_offs;
3115
3116                 if (!obj->xattr_known) {
3117                         obj->has_xattr = nval_hasvalues(x_buffer, x_size);
3118                         obj->xattr_known = 1;
3119                 }
3120
3121                 if (name)
3122                         retval = nval_get(x_buffer, x_size, name, value, size);
3123                 else
3124                         retval = nval_list(x_buffer, x_size, value, size);
3125         }
3126         yaffs_release_temp_buffer(dev, (u8 *) buffer);
3127         return retval;
3128 }
3129
3130 int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name,
3131                       const void *value, int size, int flags)
3132 {
3133         return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags);
3134 }
3135
3136 int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name)
3137 {
3138         return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0);
3139 }
3140
3141 int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value,
3142                       int size)
3143 {
3144         return yaffs_do_xattrib_fetch(obj, name, value, size);
3145 }
3146
3147 int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size)
3148 {
3149         return yaffs_do_xattrib_fetch(obj, NULL, buffer, size);
3150 }
3151
3152 static void yaffs_check_obj_details_loaded(struct yaffs_obj *in)
3153 {
3154         u8 *buf;
3155         struct yaffs_obj_hdr *oh;
3156         struct yaffs_dev *dev;
3157         struct yaffs_ext_tags tags;
3158         int result;
3159         int alloc_failed = 0;
3160
3161         if (!in || !in->lazy_loaded || in->hdr_chunk < 1)
3162                 return;
3163
3164         dev = in->my_dev;
3165         in->lazy_loaded = 0;
3166         buf = yaffs_get_temp_buffer(dev);
3167
3168         result = yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, buf, &tags);
3169         oh = (struct yaffs_obj_hdr *)buf;
3170
3171         in->yst_mode = oh->yst_mode;
3172         yaffs_load_attribs(in, oh);
3173         yaffs_set_obj_name_from_oh(in, oh);
3174
3175         if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
3176                 in->variant.symlink_variant.alias =
3177                     yaffs_clone_str(oh->alias);
3178                 if (!in->variant.symlink_variant.alias)
3179                         alloc_failed = 1;       /* Not returned */
3180         }
3181         yaffs_release_temp_buffer(dev, buf);
3182 }
3183
3184 static void yaffs_load_name_from_oh(struct yaffs_dev *dev, YCHAR *name,
3185                                     const YCHAR *oh_name, int buff_size)
3186 {
3187 #ifdef CONFIG_YAFFS_AUTO_UNICODE
3188         if (dev->param.auto_unicode) {
3189                 if (*oh_name) {
3190                         /* It is an ASCII name, do an ASCII to
3191                          * unicode conversion */
3192                         const char *ascii_oh_name = (const char *)oh_name;
3193                         int n = buff_size - 1;
3194                         while (n > 0 && *ascii_oh_name) {
3195                                 *name = *ascii_oh_name;
3196                                 name++;
3197                                 ascii_oh_name++;
3198                                 n--;
3199                         }
3200                 } else {
3201                         strncpy(name, oh_name + 1, buff_size - 1);
3202                 }
3203         } else {
3204 #else
3205         (void) dev;
3206         {
3207 #endif
3208                 strncpy(name, oh_name, buff_size - 1);
3209         }
3210 }
3211
3212 static void yaffs_load_oh_from_name(struct yaffs_dev *dev, YCHAR *oh_name,
3213                                     const YCHAR *name)
3214 {
3215 #ifdef CONFIG_YAFFS_AUTO_UNICODE
3216
3217         int is_ascii;
3218         YCHAR *w;
3219
3220         if (dev->param.auto_unicode) {
3221
3222                 is_ascii = 1;
3223                 w = name;
3224
3225                 /* Figure out if the name will fit in ascii character set */
3226                 while (is_ascii && *w) {
3227                         if ((*w) & 0xff00)
3228                                 is_ascii = 0;
3229                         w++;
3230                 }
3231
3232                 if (is_ascii) {
3233                         /* It is an ASCII name, so convert unicode to ascii */
3234                         char *ascii_oh_name = (char *)oh_name;
3235                         int n = YAFFS_MAX_NAME_LENGTH - 1;
3236                         while (n > 0 && *name) {
3237                                 *ascii_oh_name = *name;
3238                                 name++;
3239                                 ascii_oh_name++;
3240                                 n--;
3241                         }
3242                 } else {
3243                         /* Unicode name, so save starting at the second YCHAR */
3244                         *oh_name = 0;
3245                         strncpy(oh_name + 1, name, YAFFS_MAX_NAME_LENGTH - 2);
3246                 }
3247         } else {
3248 #else
3249         dev = dev;
3250         {
3251 #endif
3252                 strncpy(oh_name, name, YAFFS_MAX_NAME_LENGTH - 1);
3253         }
3254 }
3255
3256 /* UpdateObjectHeader updates the header on NAND for an object.
3257  * If name is not NULL, then that new name is used.
3258  */
3259 int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name, int force,
3260                     int is_shrink, int shadows, struct yaffs_xattr_mod *xmod)
3261 {
3262
3263         struct yaffs_block_info *bi;
3264         struct yaffs_dev *dev = in->my_dev;
3265         int prev_chunk_id;
3266         int ret_val = 0;
3267         int result = 0;
3268         int new_chunk_id;
3269         struct yaffs_ext_tags new_tags;
3270         struct yaffs_ext_tags old_tags;
3271         const YCHAR *alias = NULL;
3272         u8 *buffer = NULL;
3273         YCHAR old_name[YAFFS_MAX_NAME_LENGTH + 1];
3274         struct yaffs_obj_hdr *oh = NULL;
3275         loff_t file_size = 0;
3276
3277         strcpy(old_name, _Y("silly old name"));
3278
3279         if (in->fake && in != dev->root_dir && !force && !xmod)
3280                 return ret_val;
3281
3282         yaffs_check_gc(dev, 0);
3283         yaffs_check_obj_details_loaded(in);
3284
3285         buffer = yaffs_get_temp_buffer(in->my_dev);
3286         oh = (struct yaffs_obj_hdr *)buffer;
3287
3288         prev_chunk_id = in->hdr_chunk;
3289
3290         if (prev_chunk_id > 0) {
3291                 result = yaffs_rd_chunk_tags_nand(dev, prev_chunk_id,
3292                                                   buffer, &old_tags);
3293
3294                 yaffs_verify_oh(in, oh, &old_tags, 0);
3295                 memcpy(old_name, oh->name, sizeof(oh->name));
3296                 memset(buffer, 0xff, sizeof(struct yaffs_obj_hdr));
3297         } else {
3298                 memset(buffer, 0xff, dev->data_bytes_per_chunk);
3299         }
3300
3301         oh->type = in->variant_type;
3302         oh->yst_mode = in->yst_mode;
3303         oh->shadows_obj = oh->inband_shadowed_obj_id = shadows;
3304
3305         yaffs_load_attribs_oh(oh, in);
3306
3307         if (in->parent)
3308                 oh->parent_obj_id = in->parent->obj_id;
3309         else
3310                 oh->parent_obj_id = 0;
3311
3312         if (name && *name) {
3313                 memset(oh->name, 0, sizeof(oh->name));
3314                 yaffs_load_oh_from_name(dev, oh->name, name);
3315         } else if (prev_chunk_id > 0) {
3316                 memcpy(oh->name, old_name, sizeof(oh->name));
3317         } else {
3318                 memset(oh->name, 0, sizeof(oh->name));
3319         }
3320
3321         oh->is_shrink = is_shrink;
3322
3323         switch (in->variant_type) {
3324         case YAFFS_OBJECT_TYPE_UNKNOWN:
3325                 /* Should not happen */
3326                 break;
3327         case YAFFS_OBJECT_TYPE_FILE:
3328                 if (oh->parent_obj_id != YAFFS_OBJECTID_DELETED &&
3329                     oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED)
3330                         file_size = in->variant.file_variant.file_size;
3331                 yaffs_oh_size_load(oh, file_size);
3332                 break;
3333         case YAFFS_OBJECT_TYPE_HARDLINK:
3334                 oh->equiv_id = in->variant.hardlink_variant.equiv_id;
3335                 break;
3336         case YAFFS_OBJECT_TYPE_SPECIAL:
3337                 /* Do nothing */
3338                 break;
3339         case YAFFS_OBJECT_TYPE_DIRECTORY:
3340                 /* Do nothing */
3341                 break;
3342         case YAFFS_OBJECT_TYPE_SYMLINK:
3343                 alias = in->variant.symlink_variant.alias;
3344                 if (!alias)
3345                         alias = _Y("no alias");
3346                 strncpy(oh->alias, alias, YAFFS_MAX_ALIAS_LENGTH);
3347                 oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
3348                 break;
3349         }
3350
3351         /* process any xattrib modifications */
3352         if (xmod)
3353                 yaffs_apply_xattrib_mod(in, (char *)buffer, xmod);
3354
3355         /* Tags */
3356         memset(&new_tags, 0, sizeof(new_tags));
3357         in->serial++;
3358         new_tags.chunk_id = 0;
3359         new_tags.obj_id = in->obj_id;
3360         new_tags.serial_number = in->serial;
3361
3362         /* Add extra info for file header */
3363         new_tags.extra_available = 1;
3364         new_tags.extra_parent_id = oh->parent_obj_id;
3365         new_tags.extra_file_size = file_size;
3366         new_tags.extra_is_shrink = oh->is_shrink;
3367         new_tags.extra_equiv_id = oh->equiv_id;
3368         new_tags.extra_shadows = (oh->shadows_obj > 0) ? 1 : 0;
3369         new_tags.extra_obj_type = in->variant_type;
3370         yaffs_verify_oh(in, oh, &new_tags, 1);
3371
3372         /* Create new chunk in NAND */
3373         new_chunk_id =
3374             yaffs_write_new_chunk(dev, buffer, &new_tags,
3375                                   (prev_chunk_id > 0) ? 1 : 0);
3376
3377         if (buffer)
3378                 yaffs_release_temp_buffer(dev, buffer);
3379
3380         if (new_chunk_id < 0)
3381                 return new_chunk_id;
3382
3383         in->hdr_chunk = new_chunk_id;
3384
3385         if (prev_chunk_id > 0)
3386                 yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
3387
3388         if (!yaffs_obj_cache_dirty(in))
3389                 in->dirty = 0;
3390
3391         /* If this was a shrink, then mark the block
3392          * that the chunk lives on */
3393         if (is_shrink) {
3394                 bi = yaffs_get_block_info(in->my_dev,
3395                                           new_chunk_id /
3396                                           in->my_dev->param.chunks_per_block);
3397                 bi->has_shrink_hdr = 1;
3398         }
3399
3400
3401         return new_chunk_id;
3402 }
3403
3404 /*--------------------- File read/write ------------------------
3405  * Read and write have very similar structures.
3406  * In general the read/write has three parts to it
3407  * An incomplete chunk to start with (if the read/write is not chunk-aligned)
3408  * Some complete chunks
3409  * An incomplete chunk to end off with
3410  *
3411  * Curve-balls: the first chunk might also be the last chunk.
3412  */
3413
3414 int yaffs_file_rd(struct yaffs_obj *in, u8 * buffer, loff_t offset, int n_bytes)
3415 {
3416         int chunk;
3417         u32 start;
3418         int n_copy;
3419         int n = n_bytes;
3420         int n_done = 0;
3421         struct yaffs_cache *cache;
3422         struct yaffs_dev *dev;
3423
3424         dev = in->my_dev;
3425
3426         while (n > 0) {
3427                 yaffs_addr_to_chunk(dev, offset, &chunk, &start);
3428                 chunk++;
3429
3430                 /* OK now check for the curveball where the start and end are in
3431                  * the same chunk.
3432                  */
3433                 if ((start + n) < dev->data_bytes_per_chunk)
3434                         n_copy = n;
3435                 else
3436                         n_copy = dev->data_bytes_per_chunk - start;
3437
3438                 cache = yaffs_find_chunk_cache(in, chunk);
3439
3440                 /* If the chunk is already in the cache or it is less than
3441                  * a whole chunk or we're using inband tags then use the cache
3442                  * (if there is caching) else bypass the cache.
3443                  */
3444                 if (cache || n_copy != dev->data_bytes_per_chunk ||
3445                     dev->param.inband_tags) {
3446                         if (dev->param.n_caches > 0) {
3447
3448                                 /* If we can't find the data in the cache,
3449                                  * then load it up. */
3450
3451                                 if (!cache) {
3452                                         cache =
3453                                             yaffs_grab_chunk_cache(in->my_dev);
3454                                         cache->object = in;
3455                                         cache->chunk_id = chunk;
3456                                         cache->dirty = 0;
3457                                         cache->locked = 0;
3458                                         yaffs_rd_data_obj(in, chunk,
3459                                                           cache->data);
3460                                         cache->n_bytes = 0;
3461                                 }
3462
3463                                 yaffs_use_cache(dev, cache, 0);
3464
3465                                 cache->locked = 1;
3466
3467                                 memcpy(buffer, &cache->data[start], n_copy);
3468
3469                                 cache->locked = 0;
3470                         } else {
3471                                 /* Read into the local buffer then copy.. */
3472
3473                                 u8 *local_buffer =
3474                                     yaffs_get_temp_buffer(dev);
3475                                 yaffs_rd_data_obj(in, chunk, local_buffer);
3476
3477                                 memcpy(buffer, &local_buffer[start], n_copy);
3478
3479                                 yaffs_release_temp_buffer(dev, local_buffer);
3480                         }
3481                 } else {
3482                         /* A full chunk. Read directly into the buffer. */
3483                         yaffs_rd_data_obj(in, chunk, buffer);
3484                 }
3485                 n -= n_copy;
3486                 offset += n_copy;
3487                 buffer += n_copy;
3488                 n_done += n_copy;
3489         }
3490         return n_done;
3491 }
3492
3493 int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
3494                      int n_bytes, int write_through)
3495 {
3496
3497         int chunk;
3498         u32 start;
3499         int n_copy;
3500         int n = n_bytes;
3501         int n_done = 0;
3502         int n_writeback;
3503         loff_t start_write = offset;
3504         int chunk_written = 0;
3505         u32 n_bytes_read;
3506         loff_t chunk_start;
3507         struct yaffs_dev *dev;
3508
3509         dev = in->my_dev;
3510
3511         while (n > 0 && chunk_written >= 0) {
3512                 yaffs_addr_to_chunk(dev, offset, &chunk, &start);
3513
3514                 if (((loff_t)chunk) *
3515                     dev->data_bytes_per_chunk + start != offset ||
3516                     start >= dev->data_bytes_per_chunk) {
3517                         yaffs_trace(YAFFS_TRACE_ERROR,
3518                                 "AddrToChunk of offset %lld gives chunk %d start %d",
3519                                 offset, chunk, start);
3520                 }
3521                 chunk++;        /* File pos to chunk in file offset */
3522
3523                 /* OK now check for the curveball where the start and end are in
3524                  * the same chunk.
3525                  */
3526
3527                 if ((start + n) < dev->data_bytes_per_chunk) {
3528                         n_copy = n;
3529
3530                         /* Now calculate how many bytes to write back....
3531                          * If we're overwriting and not writing to then end of
3532                          * file then we need to write back as much as was there
3533                          * before.
3534                          */
3535
3536                         chunk_start = (((loff_t)(chunk - 1)) *
3537                                         dev->data_bytes_per_chunk);
3538
3539                         if (chunk_start > in->variant.file_variant.file_size)
3540                                 n_bytes_read = 0;       /* Past end of file */
3541                         else
3542                                 n_bytes_read =
3543                                     in->variant.file_variant.file_size -
3544                                     chunk_start;
3545
3546                         if (n_bytes_read > dev->data_bytes_per_chunk)
3547                                 n_bytes_read = dev->data_bytes_per_chunk;
3548
3549                         n_writeback =
3550                             (n_bytes_read >
3551                              (start + n)) ? n_bytes_read : (start + n);
3552
3553                         if (n_writeback < 0 ||
3554                             n_writeback > dev->data_bytes_per_chunk)
3555                                 BUG();
3556
3557                 } else {
3558                         n_copy = dev->data_bytes_per_chunk - start;
3559                         n_writeback = dev->data_bytes_per_chunk;
3560                 }
3561
3562                 if (n_copy != dev->data_bytes_per_chunk ||
3563                     !dev->param.cache_bypass_aligned ||
3564                     dev->param.inband_tags) {
3565                         /* An incomplete start or end chunk (or maybe both
3566                          * start and end chunk), or we're using inband tags,
3567                          * or we're forcing writes through the cache,
3568                          * so we want to use the cache buffers.
3569                          */
3570                         if (dev->param.n_caches > 0) {
3571                                 struct yaffs_cache *cache;
3572
3573                                 /* If we can't find the data in the cache, then
3574                                  * load the cache */
3575                                 cache = yaffs_find_chunk_cache(in, chunk);
3576
3577                                 if (!cache &&
3578                                     yaffs_check_alloc_available(dev, 1)) {
3579                                         cache = yaffs_grab_chunk_cache(dev);
3580                                         cache->object = in;
3581                                         cache->chunk_id = chunk;
3582                                         cache->dirty = 0;
3583                                         cache->locked = 0;
3584                                         yaffs_rd_data_obj(in, chunk,
3585                                                           cache->data);
3586                                 } else if (cache &&
3587                                            !cache->dirty &&
3588                                            !yaffs_check_alloc_available(dev,
3589                                                                         1)) {
3590                                         /* Drop the cache if it was a read cache
3591                                          * item and no space check has been made
3592                                          * for it.
3593                                          */
3594                                         cache = NULL;
3595                                 }
3596
3597                                 if (cache) {
3598                                         yaffs_use_cache(dev, cache, 1);
3599                                         cache->locked = 1;
3600
3601                                         memcpy(&cache->data[start], buffer,
3602                                                n_copy);
3603
3604                                         cache->locked = 0;
3605                                         cache->n_bytes = n_writeback;
3606
3607                                         if (write_through) {
3608                                                 chunk_written =
3609                                                     yaffs_wr_data_obj
3610                                                     (cache->object,
3611                                                      cache->chunk_id,
3612                                                      cache->data,
3613                                                      cache->n_bytes, 1);
3614                                                 cache->dirty = 0;
3615                                         }
3616                                 } else {
3617                                         chunk_written = -1;     /* fail write */
3618                                 }
3619                         } else {
3620                                 /* An incomplete start or end chunk (or maybe
3621                                  * both start and end chunk). Read into the
3622                                  * local buffer then copy over and write back.
3623                                  */
3624
3625                                 u8 *local_buffer = yaffs_get_temp_buffer(dev);
3626
3627                                 yaffs_rd_data_obj(in, chunk, local_buffer);
3628                                 memcpy(&local_buffer[start], buffer, n_copy);
3629
3630                                 chunk_written =
3631                                     yaffs_wr_data_obj(in, chunk,
3632                                                       local_buffer,
3633                                                       n_writeback, 0);
3634
3635                                 yaffs_release_temp_buffer(dev, local_buffer);
3636                         }
3637                 } else {
3638                         /* A full chunk. Write directly from the buffer. */
3639
3640                         chunk_written =
3641                             yaffs_wr_data_obj(in, chunk, buffer,
3642                                               dev->data_bytes_per_chunk, 0);
3643
3644                         /* Since we've overwritten the cached data,
3645                          * we better invalidate it. */
3646                         yaffs_invalidate_chunk_cache(in, chunk);
3647                 }
3648
3649                 if (chunk_written >= 0) {
3650                         n -= n_copy;
3651                         offset += n_copy;
3652                         buffer += n_copy;
3653                         n_done += n_copy;
3654                 }
3655         }
3656
3657         /* Update file object */
3658
3659         if ((start_write + n_done) > in->variant.file_variant.file_size)
3660                 in->variant.file_variant.file_size = (start_write + n_done);
3661
3662         in->dirty = 1;
3663         return n_done;
3664 }
3665
3666 int yaffs_wr_file(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
3667                   int n_bytes, int write_through)
3668 {
3669         yaffs2_handle_hole(in, offset);
3670         return yaffs_do_file_wr(in, buffer, offset, n_bytes, write_through);
3671 }
3672
3673 /* ---------------------- File resizing stuff ------------------ */
3674
3675 static void yaffs_prune_chunks(struct yaffs_obj *in, loff_t new_size)
3676 {
3677
3678         struct yaffs_dev *dev = in->my_dev;
3679         loff_t old_size = in->variant.file_variant.file_size;
3680         int i;
3681         int chunk_id;
3682         u32 dummy;
3683         int last_del;
3684         int start_del;
3685
3686         if (old_size > 0)
3687                 yaffs_addr_to_chunk(dev, old_size - 1, &last_del, &dummy);
3688         else
3689                 last_del = 0;
3690
3691         yaffs_addr_to_chunk(dev, new_size + dev->data_bytes_per_chunk - 1,
3692                                 &start_del, &dummy);
3693         last_del++;
3694         start_del++;
3695
3696         /* Delete backwards so that we don't end up with holes if
3697          * power is lost part-way through the operation.
3698          */
3699         for (i = last_del; i >= start_del; i--) {
3700                 /* NB this could be optimised somewhat,
3701                  * eg. could retrieve the tags and write them without
3702                  * using yaffs_chunk_del
3703                  */
3704
3705                 chunk_id = yaffs_find_del_file_chunk(in, i, NULL);
3706
3707                 if (chunk_id < 1)
3708                         continue;
3709
3710                 if (chunk_id <
3711                     (dev->internal_start_block * dev->param.chunks_per_block) ||
3712                     chunk_id >=
3713                     ((dev->internal_end_block + 1) *
3714                       dev->param.chunks_per_block)) {
3715                         yaffs_trace(YAFFS_TRACE_ALWAYS,
3716                                 "Found daft chunk_id %d for %d",
3717                                 chunk_id, i);
3718                 } else {
3719                         in->n_data_chunks--;
3720                         yaffs_chunk_del(dev, chunk_id, 1, __LINE__);
3721                 }
3722         }
3723 }
3724
3725 void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size)
3726 {
3727         int new_full;
3728         u32 new_partial;
3729         struct yaffs_dev *dev = obj->my_dev;
3730
3731         yaffs_addr_to_chunk(dev, new_size, &new_full, &new_partial);
3732
3733         yaffs_prune_chunks(obj, new_size);
3734
3735         if (new_partial != 0) {
3736                 int last_chunk = 1 + new_full;
3737                 u8 *local_buffer = yaffs_get_temp_buffer(dev);
3738
3739                 /* Rewrite the last chunk with its new size and zero pad */
3740                 yaffs_rd_data_obj(obj, last_chunk, local_buffer);
3741                 memset(local_buffer + new_partial, 0,
3742                        dev->data_bytes_per_chunk - new_partial);
3743
3744                 yaffs_wr_data_obj(obj, last_chunk, local_buffer,
3745                                   new_partial, 1);
3746
3747                 yaffs_release_temp_buffer(dev, local_buffer);
3748         }
3749
3750         obj->variant.file_variant.file_size = new_size;
3751
3752         yaffs_prune_tree(dev, &obj->variant.file_variant);
3753 }
3754
3755 int yaffs_resize_file(struct yaffs_obj *in, loff_t new_size)
3756 {
3757         struct yaffs_dev *dev = in->my_dev;
3758         loff_t old_size = in->variant.file_variant.file_size;
3759
3760         yaffs_flush_file_cache(in);
3761         yaffs_invalidate_whole_cache(in);
3762
3763         yaffs_check_gc(dev, 0);
3764
3765         if (in->variant_type != YAFFS_OBJECT_TYPE_FILE)
3766                 return YAFFS_FAIL;
3767
3768         if (new_size == old_size)
3769                 return YAFFS_OK;
3770
3771         if (new_size > old_size) {
3772                 yaffs2_handle_hole(in, new_size);
3773                 in->variant.file_variant.file_size = new_size;
3774         } else {
3775                 /* new_size < old_size */
3776                 yaffs_resize_file_down(in, new_size);
3777         }
3778
3779         /* Write a new object header to reflect the resize.
3780          * show we've shrunk the file, if need be
3781          * Do this only if the file is not in the deleted directories
3782          * and is not shadowed.
3783          */
3784         if (in->parent &&
3785             !in->is_shadowed &&
3786             in->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
3787             in->parent->obj_id != YAFFS_OBJECTID_DELETED)
3788                 yaffs_update_oh(in, NULL, 0, 0, 0, NULL);
3789
3790         return YAFFS_OK;
3791 }
3792
3793 int yaffs_flush_file(struct yaffs_obj *in, int update_time, int data_sync)
3794 {
3795         if (!in->dirty)
3796                 return YAFFS_OK;
3797
3798         yaffs_flush_file_cache(in);
3799
3800         if (data_sync)
3801                 return YAFFS_OK;
3802
3803         if (update_time)
3804                 yaffs_load_current_time(in, 0, 0);
3805
3806         return (yaffs_update_oh(in, NULL, 0, 0, 0, NULL) >= 0) ?
3807                                 YAFFS_OK : YAFFS_FAIL;
3808 }
3809
3810
3811 /* yaffs_del_file deletes the whole file data
3812  * and the inode associated with the file.
3813  * It does not delete the links associated with the file.
3814  */
3815 static int yaffs_unlink_file_if_needed(struct yaffs_obj *in)
3816 {
3817         int ret_val;
3818         int del_now = 0;
3819         struct yaffs_dev *dev = in->my_dev;
3820
3821         if (!in->my_inode)
3822                 del_now = 1;
3823
3824         if (del_now) {
3825                 ret_val =
3826                     yaffs_change_obj_name(in, in->my_dev->del_dir,
3827                                           _Y("deleted"), 0, 0);
3828                 yaffs_trace(YAFFS_TRACE_TRACING,
3829                         "yaffs: immediate deletion of file %d",
3830                         in->obj_id);
3831                 in->deleted = 1;
3832                 in->my_dev->n_deleted_files++;
3833                 if (dev->param.disable_soft_del || dev->param.is_yaffs2)
3834                         yaffs_resize_file(in, 0);
3835                 yaffs_soft_del_file(in);
3836         } else {
3837                 ret_val =
3838                     yaffs_change_obj_name(in, in->my_dev->unlinked_dir,
3839                                           _Y("unlinked"), 0, 0);
3840         }
3841         return ret_val;
3842 }
3843
3844 static int yaffs_del_file(struct yaffs_obj *in)
3845 {
3846         int ret_val = YAFFS_OK;
3847         int deleted;    /* Need to cache value on stack if in is freed */
3848         struct yaffs_dev *dev = in->my_dev;
3849
3850         if (dev->param.disable_soft_del || dev->param.is_yaffs2)
3851                 yaffs_resize_file(in, 0);
3852
3853         if (in->n_data_chunks > 0) {
3854                 /* Use soft deletion if there is data in the file.
3855                  * That won't be the case if it has been resized to zero.
3856                  */
3857                 if (!in->unlinked)
3858                         ret_val = yaffs_unlink_file_if_needed(in);
3859
3860                 deleted = in->deleted;
3861
3862                 if (ret_val == YAFFS_OK && in->unlinked && !in->deleted) {
3863                         in->deleted = 1;
3864                         deleted = 1;
3865                         in->my_dev->n_deleted_files++;
3866                         yaffs_soft_del_file(in);
3867                 }
3868                 return deleted ? YAFFS_OK : YAFFS_FAIL;
3869         } else {
3870                 /* The file has no data chunks so we toss it immediately */
3871                 yaffs_free_tnode(in->my_dev, in->variant.file_variant.top);
3872                 in->variant.file_variant.top = NULL;
3873                 yaffs_generic_obj_del(in);
3874
3875                 return YAFFS_OK;
3876         }
3877 }
3878
3879 int yaffs_is_non_empty_dir(struct yaffs_obj *obj)
3880 {
3881         return (obj &&
3882                 obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) &&
3883                 !(list_empty(&obj->variant.dir_variant.children));
3884 }
3885
3886 static int yaffs_del_dir(struct yaffs_obj *obj)
3887 {
3888         /* First check that the directory is empty. */
3889         if (yaffs_is_non_empty_dir(obj))
3890                 return YAFFS_FAIL;
3891
3892         return yaffs_generic_obj_del(obj);
3893 }
3894
3895 static int yaffs_del_symlink(struct yaffs_obj *in)
3896 {
3897         kfree(in->variant.symlink_variant.alias);
3898         in->variant.symlink_variant.alias = NULL;
3899
3900         return yaffs_generic_obj_del(in);
3901 }
3902
3903 static int yaffs_del_link(struct yaffs_obj *in)
3904 {
3905         /* remove this hardlink from the list associated with the equivalent
3906          * object
3907          */
3908         list_del_init(&in->hard_links);
3909         return yaffs_generic_obj_del(in);
3910 }
3911
3912 int yaffs_del_obj(struct yaffs_obj *obj)
3913 {
3914         int ret_val = -1;
3915
3916         switch (obj->variant_type) {
3917         case YAFFS_OBJECT_TYPE_FILE:
3918                 ret_val = yaffs_del_file(obj);
3919                 break;
3920         case YAFFS_OBJECT_TYPE_DIRECTORY:
3921                 if (!list_empty(&obj->variant.dir_variant.dirty)) {
3922                         yaffs_trace(YAFFS_TRACE_BACKGROUND,
3923                                 "Remove object %d from dirty directories",
3924                                 obj->obj_id);
3925                         list_del_init(&obj->variant.dir_variant.dirty);
3926                 }
3927                 return yaffs_del_dir(obj);
3928                 break;
3929         case YAFFS_OBJECT_TYPE_SYMLINK:
3930                 ret_val = yaffs_del_symlink(obj);
3931                 break;
3932         case YAFFS_OBJECT_TYPE_HARDLINK: