Seperate drivers and tags handlers into their own interface structures
[yaffs2.git] / yaffs_guts.c
1 /*
2  * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
3  *
4  * Copyright (C) 2002-2011 Aleph One Ltd.
5  *   for Toby Churchill Ltd and Brightstar Engineering
6  *
7  * Created by Charles Manning <charles@aleph1.co.uk>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13
14 #include "yportenv.h"
15 #include "yaffs_trace.h"
16
17 #include "yaffs_guts.h"
18 #include "yaffs_getblockinfo.h"
19 #include "yaffs_tagscompat.h"
20 #include "yaffs_tagsmarshall.h"
21 #include "yaffs_nand.h"
22 #include "yaffs_yaffs1.h"
23 #include "yaffs_yaffs2.h"
24 #include "yaffs_bitmap.h"
25 #include "yaffs_verify.h"
26 #include "yaffs_nand.h"
27 #include "yaffs_packedtags2.h"
28 #include "yaffs_nameval.h"
29 #include "yaffs_allocator.h"
30 #include "yaffs_attribs.h"
31 #include "yaffs_summary.h"
32
33 /* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */
34 #define YAFFS_GC_GOOD_ENOUGH 2
35 #define YAFFS_GC_PASSIVE_THRESHOLD 4
36
37 #include "yaffs_ecc.h"
38
39 /* Forward declarations */
40
41 static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
42                              const u8 *buffer, int n_bytes, int use_reserve);
43
44
45
46 /* Function to calculate chunk and offset */
47
48 void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
49                                 int *chunk_out, u32 *offset_out)
50 {
51         int chunk;
52         u32 offset;
53
54         chunk = (u32) (addr >> dev->chunk_shift);
55
56         if (dev->chunk_div == 1) {
57                 /* easy power of 2 case */
58                 offset = (u32) (addr & dev->chunk_mask);
59         } else {
60                 /* Non power-of-2 case */
61
62                 loff_t chunk_base;
63
64                 chunk /= dev->chunk_div;
65
66                 chunk_base = ((loff_t) chunk) * dev->data_bytes_per_chunk;
67                 offset = (u32) (addr - chunk_base);
68         }
69
70         *chunk_out = chunk;
71         *offset_out = offset;
72 }
73
74 /* Function to return the number of shifts for a power of 2 greater than or
75  * equal to the given number
76  * Note we don't try to cater for all possible numbers and this does not have to
77  * be hellishly efficient.
78  */
79
80 static inline u32 calc_shifts_ceiling(u32 x)
81 {
82         int extra_bits;
83         int shifts;
84
85         shifts = extra_bits = 0;
86
87         while (x > 1) {
88                 if (x & 1)
89                         extra_bits++;
90                 x >>= 1;
91                 shifts++;
92         }
93
94         if (extra_bits)
95                 shifts++;
96
97         return shifts;
98 }
99
100 /* Function to return the number of shifts to get a 1 in bit 0
101  */
102
103 static inline u32 calc_shifts(u32 x)
104 {
105         u32 shifts;
106
107         shifts = 0;
108
109         if (!x)
110                 return 0;
111
112         while (!(x & 1)) {
113                 x >>= 1;
114                 shifts++;
115         }
116
117         return shifts;
118 }
119
120 /*
121  * Temporary buffer manipulations.
122  */
123
124 static int yaffs_init_tmp_buffers(struct yaffs_dev *dev)
125 {
126         int i;
127         u8 *buf = (u8 *) 1;
128
129         memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer));
130
131         for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
132                 dev->temp_buffer[i].in_use = 0;
133                 buf = kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
134                 dev->temp_buffer[i].buffer = buf;
135         }
136
137         return buf ? YAFFS_OK : YAFFS_FAIL;
138 }
139
140 u8 *yaffs_get_temp_buffer(struct yaffs_dev * dev)
141 {
142         int i;
143
144         dev->temp_in_use++;
145         if (dev->temp_in_use > dev->max_temp)
146                 dev->max_temp = dev->temp_in_use;
147
148         for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
149                 if (dev->temp_buffer[i].in_use == 0) {
150                         dev->temp_buffer[i].in_use = 1;
151                         return dev->temp_buffer[i].buffer;
152                 }
153         }
154
155         yaffs_trace(YAFFS_TRACE_BUFFERS, "Out of temp buffers");
156         /*
157          * If we got here then we have to allocate an unmanaged one
158          * This is not good.
159          */
160
161         dev->unmanaged_buffer_allocs++;
162         return kmalloc(dev->data_bytes_per_chunk, GFP_NOFS);
163
164 }
165
166 void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer)
167 {
168         int i;
169
170         dev->temp_in_use--;
171
172         for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
173                 if (dev->temp_buffer[i].buffer == buffer) {
174                         dev->temp_buffer[i].in_use = 0;
175                         return;
176                 }
177         }
178
179         if (buffer) {
180                 /* assume it is an unmanaged one. */
181                 yaffs_trace(YAFFS_TRACE_BUFFERS,
182                         "Releasing unmanaged temp buffer");
183                 kfree(buffer);
184                 dev->unmanaged_buffer_deallocs++;
185         }
186
187 }
188
189 /*
190  * Functions for robustisizing TODO
191  *
192  */
193
194 static void yaffs_handle_chunk_wr_ok(struct yaffs_dev *dev, int nand_chunk,
195                                      const u8 *data,
196                                      const struct yaffs_ext_tags *tags)
197 {
198         (void) dev;
199         (void) nand_chunk;
200         (void) data;
201         (void) tags;
202 }
203
204 static void yaffs_handle_chunk_update(struct yaffs_dev *dev, int nand_chunk,
205                                       const struct yaffs_ext_tags *tags)
206 {
207         (void) dev;
208         (void) nand_chunk;
209         (void) tags;
210 }
211
212 void yaffs_handle_chunk_error(struct yaffs_dev *dev,
213                               struct yaffs_block_info *bi)
214 {
215         if (!bi->gc_prioritise) {
216                 bi->gc_prioritise = 1;
217                 dev->has_pending_prioritised_gc = 1;
218                 bi->chunk_error_strikes++;
219
220                 if (bi->chunk_error_strikes > 3) {
221                         bi->needs_retiring = 1; /* Too many stikes, so retire */
222                         yaffs_trace(YAFFS_TRACE_ALWAYS,
223                                 "yaffs: Block struck out");
224
225                 }
226         }
227 }
228
229 static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, int nand_chunk,
230                                         int erased_ok)
231 {
232         int flash_block = nand_chunk / dev->param.chunks_per_block;
233         struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
234
235         yaffs_handle_chunk_error(dev, bi);
236
237         if (erased_ok) {
238                 /* Was an actual write failure,
239                  * so mark the block for retirement.*/
240                 bi->needs_retiring = 1;
241                 yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
242                   "**>> Block %d needs retiring", flash_block);
243         }
244
245         /* Delete the chunk */
246         yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
247         yaffs_skip_rest_of_block(dev);
248 }
249
250 /*
251  * Verification code
252  */
253
254 /*
255  *  Simple hash function. Needs to have a reasonable spread
256  */
257
258 static inline int yaffs_hash_fn(int n)
259 {
260         if (n < 0)
261                 n = -n;
262         return n % YAFFS_NOBJECT_BUCKETS;
263 }
264
265 /*
266  * Access functions to useful fake objects.
267  * Note that root might have a presence in NAND if permissions are set.
268  */
269
270 struct yaffs_obj *yaffs_root(struct yaffs_dev *dev)
271 {
272         return dev->root_dir;
273 }
274
275 struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev)
276 {
277         return dev->lost_n_found;
278 }
279
280 /*
281  *  Erased NAND checking functions
282  */
283
284 int yaffs_check_ff(u8 *buffer, int n_bytes)
285 {
286         /* Horrible, slow implementation */
287         while (n_bytes--) {
288                 if (*buffer != 0xff)
289                         return 0;
290                 buffer++;
291         }
292         return 1;
293 }
294
295 static int yaffs_check_chunk_erased(struct yaffs_dev *dev, int nand_chunk)
296 {
297         int retval = YAFFS_OK;
298         u8 *data = yaffs_get_temp_buffer(dev);
299         struct yaffs_ext_tags tags;
300         int result;
301
302         result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags);
303
304         if (tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR)
305                 retval = YAFFS_FAIL;
306
307         if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) ||
308                 tags.chunk_used) {
309                 yaffs_trace(YAFFS_TRACE_NANDACCESS,
310                         "Chunk %d not erased", nand_chunk);
311                 retval = YAFFS_FAIL;
312         }
313
314         yaffs_release_temp_buffer(dev, data);
315
316         return retval;
317
318 }
319
320 static int yaffs_verify_chunk_written(struct yaffs_dev *dev,
321                                       int nand_chunk,
322                                       const u8 *data,
323                                       struct yaffs_ext_tags *tags)
324 {
325         int retval = YAFFS_OK;
326         struct yaffs_ext_tags temp_tags;
327         u8 *buffer = yaffs_get_temp_buffer(dev);
328         int result;
329
330         result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags);
331         if (memcmp(buffer, data, dev->data_bytes_per_chunk) ||
332             temp_tags.obj_id != tags->obj_id ||
333             temp_tags.chunk_id != tags->chunk_id ||
334             temp_tags.n_bytes != tags->n_bytes)
335                 retval = YAFFS_FAIL;
336
337         yaffs_release_temp_buffer(dev, buffer);
338
339         return retval;
340 }
341
342
343 int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks)
344 {
345         int reserved_chunks;
346         int reserved_blocks = dev->param.n_reserved_blocks;
347         int checkpt_blocks;
348
349         checkpt_blocks = yaffs_calc_checkpt_blocks_required(dev);
350
351         reserved_chunks =
352             (reserved_blocks + checkpt_blocks) * dev->param.chunks_per_block;
353
354         return (dev->n_free_chunks > (reserved_chunks + n_chunks));
355 }
356
357 static int yaffs_find_alloc_block(struct yaffs_dev *dev)
358 {
359         int i;
360         struct yaffs_block_info *bi;
361
362         if (dev->n_erased_blocks < 1) {
363                 /* Hoosterman we've got a problem.
364                  * Can't get space to gc
365                  */
366                 yaffs_trace(YAFFS_TRACE_ERROR,
367                   "yaffs tragedy: no more erased blocks");
368
369                 return -1;
370         }
371
372         /* Find an empty block. */
373
374         for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
375                 dev->alloc_block_finder++;
376                 if (dev->alloc_block_finder < dev->internal_start_block
377                     || dev->alloc_block_finder > dev->internal_end_block) {
378                         dev->alloc_block_finder = dev->internal_start_block;
379                 }
380
381                 bi = yaffs_get_block_info(dev, dev->alloc_block_finder);
382
383                 if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
384                         bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING;
385                         dev->seq_number++;
386                         bi->seq_number = dev->seq_number;
387                         dev->n_erased_blocks--;
388                         yaffs_trace(YAFFS_TRACE_ALLOCATE,
389                           "Allocated block %d, seq  %d, %d left" ,
390                            dev->alloc_block_finder, dev->seq_number,
391                            dev->n_erased_blocks);
392                         return dev->alloc_block_finder;
393                 }
394         }
395
396         yaffs_trace(YAFFS_TRACE_ALWAYS,
397                 "yaffs tragedy: no more erased blocks, but there should have been %d",
398                 dev->n_erased_blocks);
399
400         return -1;
401 }
402
403 static int yaffs_alloc_chunk(struct yaffs_dev *dev, int use_reserver,
404                              struct yaffs_block_info **block_ptr)
405 {
406         int ret_val;
407         struct yaffs_block_info *bi;
408
409         if (dev->alloc_block < 0) {
410                 /* Get next block to allocate off */
411                 dev->alloc_block = yaffs_find_alloc_block(dev);
412                 dev->alloc_page = 0;
413         }
414
415         if (!use_reserver && !yaffs_check_alloc_available(dev, 1)) {
416                 /* No space unless we're allowed to use the reserve. */
417                 return -1;
418         }
419
420         if (dev->n_erased_blocks < dev->param.n_reserved_blocks
421             && dev->alloc_page == 0)
422                 yaffs_trace(YAFFS_TRACE_ALLOCATE, "Allocating reserve");
423
424         /* Next page please.... */
425         if (dev->alloc_block >= 0) {
426                 bi = yaffs_get_block_info(dev, dev->alloc_block);
427
428                 ret_val = (dev->alloc_block * dev->param.chunks_per_block) +
429                     dev->alloc_page;
430                 bi->pages_in_use++;
431                 yaffs_set_chunk_bit(dev, dev->alloc_block, dev->alloc_page);
432
433                 dev->alloc_page++;
434
435                 dev->n_free_chunks--;
436
437                 /* If the block is full set the state to full */
438                 if (dev->alloc_page >= dev->param.chunks_per_block) {
439                         bi->block_state = YAFFS_BLOCK_STATE_FULL;
440                         dev->alloc_block = -1;
441                 }
442
443                 if (block_ptr)
444                         *block_ptr = bi;
445
446                 return ret_val;
447         }
448
449         yaffs_trace(YAFFS_TRACE_ERROR,
450                 "!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!");
451
452         return -1;
453 }
454
455 static int yaffs_get_erased_chunks(struct yaffs_dev *dev)
456 {
457         int n;
458
459         n = dev->n_erased_blocks * dev->param.chunks_per_block;
460
461         if (dev->alloc_block > 0)
462                 n += (dev->param.chunks_per_block - dev->alloc_page);
463
464         return n;
465
466 }
467
468 /*
469  * yaffs_skip_rest_of_block() skips over the rest of the allocation block
470  * if we don't want to write to it.
471  */
472 void yaffs_skip_rest_of_block(struct yaffs_dev *dev)
473 {
474         struct yaffs_block_info *bi;
475
476         if (dev->alloc_block > 0) {
477                 bi = yaffs_get_block_info(dev, dev->alloc_block);
478                 if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
479                         bi->block_state = YAFFS_BLOCK_STATE_FULL;
480                         dev->alloc_block = -1;
481                 }
482         }
483 }
484
485 static int yaffs_write_new_chunk(struct yaffs_dev *dev,
486                                  const u8 *data,
487                                  struct yaffs_ext_tags *tags, int use_reserver)
488 {
489         int attempts = 0;
490         int write_ok = 0;
491         int chunk;
492
493         yaffs2_checkpt_invalidate(dev);
494
495         do {
496                 struct yaffs_block_info *bi = 0;
497                 int erased_ok = 0;
498
499                 chunk = yaffs_alloc_chunk(dev, use_reserver, &bi);
500                 if (chunk < 0) {
501                         /* no space */
502                         break;
503                 }
504
505                 /* First check this chunk is erased, if it needs
506                  * checking.  The checking policy (unless forced
507                  * always on) is as follows:
508                  *
509                  * Check the first page we try to write in a block.
510                  * If the check passes then we don't need to check any
511                  * more.        If the check fails, we check again...
512                  * If the block has been erased, we don't need to check.
513                  *
514                  * However, if the block has been prioritised for gc,
515                  * then we think there might be something odd about
516                  * this block and stop using it.
517                  *
518                  * Rationale: We should only ever see chunks that have
519                  * not been erased if there was a partially written
520                  * chunk due to power loss.  This checking policy should
521                  * catch that case with very few checks and thus save a
522                  * lot of checks that are most likely not needed.
523                  *
524                  * Mods to the above
525                  * If an erase check fails or the write fails we skip the
526                  * rest of the block.
527                  */
528
529                 /* let's give it a try */
530                 attempts++;
531
532                 if (dev->param.always_check_erased)
533                         bi->skip_erased_check = 0;
534
535                 if (!bi->skip_erased_check) {
536                         erased_ok = yaffs_check_chunk_erased(dev, chunk);
537                         if (erased_ok != YAFFS_OK) {
538                                 yaffs_trace(YAFFS_TRACE_ERROR,
539                                   "**>> yaffs chunk %d was not erased",
540                                   chunk);
541
542                                 /* If not erased, delete this one,
543                                  * skip rest of block and
544                                  * try another chunk */
545                                 yaffs_chunk_del(dev, chunk, 1, __LINE__);
546                                 yaffs_skip_rest_of_block(dev);
547                                 continue;
548                         }
549                 }
550
551                 write_ok = yaffs_wr_chunk_tags_nand(dev, chunk, data, tags);
552
553                 if (!bi->skip_erased_check)
554                         write_ok =
555                             yaffs_verify_chunk_written(dev, chunk, data, tags);
556
557                 if (write_ok != YAFFS_OK) {
558                         /* Clean up aborted write, skip to next block and
559                          * try another chunk */
560                         yaffs_handle_chunk_wr_error(dev, chunk, erased_ok);
561                         continue;
562                 }
563
564                 bi->skip_erased_check = 1;
565
566                 /* Copy the data into the robustification buffer */
567                 yaffs_handle_chunk_wr_ok(dev, chunk, data, tags);
568
569         } while (write_ok != YAFFS_OK &&
570                  (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts));
571
572         if (!write_ok)
573                 chunk = -1;
574
575         if (attempts > 1) {
576                 yaffs_trace(YAFFS_TRACE_ERROR,
577                         "**>> yaffs write required %d attempts",
578                         attempts);
579                 dev->n_retried_writes += (attempts - 1);
580         }
581
582         return chunk;
583 }
584
585 /*
586  * Block retiring for handling a broken block.
587  */
588
589 static void yaffs_retire_block(struct yaffs_dev *dev, int flash_block)
590 {
591         struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
592
593         yaffs2_checkpt_invalidate(dev);
594
595         yaffs2_clear_oldest_dirty_seq(dev, bi);
596
597         if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) {
598                 if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) {
599                         yaffs_trace(YAFFS_TRACE_ALWAYS,
600                                 "yaffs: Failed to mark bad and erase block %d",
601                                 flash_block);
602                 } else {
603                         struct yaffs_ext_tags tags;
604                         int chunk_id =
605                             flash_block * dev->param.chunks_per_block;
606
607                         u8 *buffer = yaffs_get_temp_buffer(dev);
608
609                         memset(buffer, 0xff, dev->data_bytes_per_chunk);
610                         memset(&tags, 0, sizeof(tags));
611                         tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK;
612                         if (dev->th.write_chunk_tags_fn(dev, chunk_id -
613                                                         dev->chunk_offset,
614                                                         buffer,
615                                                         &tags) != YAFFS_OK)
616                                 yaffs_trace(YAFFS_TRACE_ALWAYS,
617                                         "yaffs: Failed to write bad block marker to block %d",
618                                         flash_block);
619
620                         yaffs_release_temp_buffer(dev, buffer);
621                 }
622         }
623
624         bi->block_state = YAFFS_BLOCK_STATE_DEAD;
625         bi->gc_prioritise = 0;
626         bi->needs_retiring = 0;
627
628         dev->n_retired_blocks++;
629 }
630
631 /*---------------- Name handling functions ------------*/
632
633 static u16 yaffs_calc_name_sum(const YCHAR *name)
634 {
635         u16 sum = 0;
636         u16 i = 1;
637
638         if (!name)
639                 return 0;
640
641         while ((*name) && i < (YAFFS_MAX_NAME_LENGTH / 2)) {
642
643                 /* 0x1f mask is case insensitive */
644                 sum += ((*name) & 0x1f) * i;
645                 i++;
646                 name++;
647         }
648         return sum;
649 }
650
651 void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name)
652 {
653         memset(obj->short_name, 0, sizeof(obj->short_name));
654         if (name &&
655                 strnlen(name, YAFFS_SHORT_NAME_LENGTH + 1) <=
656                 YAFFS_SHORT_NAME_LENGTH)
657                 strcpy(obj->short_name, name);
658         else
659                 obj->short_name[0] = _Y('\0');
660         obj->sum = yaffs_calc_name_sum(name);
661 }
662
663 void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
664                                 const struct yaffs_obj_hdr *oh)
665 {
666 #ifdef CONFIG_YAFFS_AUTO_UNICODE
667         YCHAR tmp_name[YAFFS_MAX_NAME_LENGTH + 1];
668         memset(tmp_name, 0, sizeof(tmp_name));
669         yaffs_load_name_from_oh(obj->my_dev, tmp_name, oh->name,
670                                 YAFFS_MAX_NAME_LENGTH + 1);
671         yaffs_set_obj_name(obj, tmp_name);
672 #else
673         yaffs_set_obj_name(obj, oh->name);
674 #endif
675 }
676
677 loff_t yaffs_max_file_size(struct yaffs_dev *dev)
678 {
679         if(sizeof(loff_t) < 8)
680                 return YAFFS_MAX_FILE_SIZE_32;
681         else
682                 return ((loff_t) YAFFS_MAX_CHUNK_ID) * dev->data_bytes_per_chunk;
683 }
684
685 /*-------------------- TNODES -------------------
686
687  * List of spare tnodes
688  * The list is hooked together using the first pointer
689  * in the tnode.
690  */
691
692 struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev)
693 {
694         struct yaffs_tnode *tn = yaffs_alloc_raw_tnode(dev);
695
696         if (tn) {
697                 memset(tn, 0, dev->tnode_size);
698                 dev->n_tnodes++;
699         }
700
701         dev->checkpoint_blocks_required = 0;    /* force recalculation */
702
703         return tn;
704 }
705
706 /* FreeTnode frees up a tnode and puts it back on the free list */
707 static void yaffs_free_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
708 {
709         yaffs_free_raw_tnode(dev, tn);
710         dev->n_tnodes--;
711         dev->checkpoint_blocks_required = 0;    /* force recalculation */
712 }
713
714 static void yaffs_deinit_tnodes_and_objs(struct yaffs_dev *dev)
715 {
716         yaffs_deinit_raw_tnodes_and_objs(dev);
717         dev->n_obj = 0;
718         dev->n_tnodes = 0;
719 }
720
721 static void yaffs_load_tnode_0(struct yaffs_dev *dev, struct yaffs_tnode *tn,
722                         unsigned pos, unsigned val)
723 {
724         u32 *map = (u32 *) tn;
725         u32 bit_in_map;
726         u32 bit_in_word;
727         u32 word_in_map;
728         u32 mask;
729
730         pos &= YAFFS_TNODES_LEVEL0_MASK;
731         val >>= dev->chunk_grp_bits;
732
733         bit_in_map = pos * dev->tnode_width;
734         word_in_map = bit_in_map / 32;
735         bit_in_word = bit_in_map & (32 - 1);
736
737         mask = dev->tnode_mask << bit_in_word;
738
739         map[word_in_map] &= ~mask;
740         map[word_in_map] |= (mask & (val << bit_in_word));
741
742         if (dev->tnode_width > (32 - bit_in_word)) {
743                 bit_in_word = (32 - bit_in_word);
744                 word_in_map++;
745                 mask =
746                     dev->tnode_mask >> bit_in_word;
747                 map[word_in_map] &= ~mask;
748                 map[word_in_map] |= (mask & (val >> bit_in_word));
749         }
750 }
751
752 u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
753                          unsigned pos)
754 {
755         u32 *map = (u32 *) tn;
756         u32 bit_in_map;
757         u32 bit_in_word;
758         u32 word_in_map;
759         u32 val;
760
761         pos &= YAFFS_TNODES_LEVEL0_MASK;
762
763         bit_in_map = pos * dev->tnode_width;
764         word_in_map = bit_in_map / 32;
765         bit_in_word = bit_in_map & (32 - 1);
766
767         val = map[word_in_map] >> bit_in_word;
768
769         if (dev->tnode_width > (32 - bit_in_word)) {
770                 bit_in_word = (32 - bit_in_word);
771                 word_in_map++;
772                 val |= (map[word_in_map] << bit_in_word);
773         }
774
775         val &= dev->tnode_mask;
776         val <<= dev->chunk_grp_bits;
777
778         return val;
779 }
780
781 /* ------------------- End of individual tnode manipulation -----------------*/
782
783 /* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
784  * The look up tree is represented by the top tnode and the number of top_level
785  * in the tree. 0 means only the level 0 tnode is in the tree.
786  */
787
788 /* FindLevel0Tnode finds the level 0 tnode, if one exists. */
789 struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
790                                        struct yaffs_file_var *file_struct,
791                                        u32 chunk_id)
792 {
793         struct yaffs_tnode *tn = file_struct->top;
794         u32 i;
795         int required_depth;
796         int level = file_struct->top_level;
797
798         (void) dev;
799
800         /* Check sane level and chunk Id */
801         if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
802                 return NULL;
803
804         if (chunk_id > YAFFS_MAX_CHUNK_ID)
805                 return NULL;
806
807         /* First check we're tall enough (ie enough top_level) */
808
809         i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
810         required_depth = 0;
811         while (i) {
812                 i >>= YAFFS_TNODES_INTERNAL_BITS;
813                 required_depth++;
814         }
815
816         if (required_depth > file_struct->top_level)
817                 return NULL;    /* Not tall enough, so we can't find it */
818
819         /* Traverse down to level 0 */
820         while (level > 0 && tn) {
821                 tn = tn->internal[(chunk_id >>
822                                    (YAFFS_TNODES_LEVEL0_BITS +
823                                     (level - 1) *
824                                     YAFFS_TNODES_INTERNAL_BITS)) &
825                                   YAFFS_TNODES_INTERNAL_MASK];
826                 level--;
827         }
828
829         return tn;
830 }
831
832 /* add_find_tnode_0 finds the level 0 tnode if it exists,
833  * otherwise first expands the tree.
834  * This happens in two steps:
835  *  1. If the tree isn't tall enough, then make it taller.
836  *  2. Scan down the tree towards the level 0 tnode adding tnodes if required.
837  *
838  * Used when modifying the tree.
839  *
840  *  If the tn argument is NULL, then a fresh tnode will be added otherwise the
841  *  specified tn will be plugged into the ttree.
842  */
843
844 struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
845                                            struct yaffs_file_var *file_struct,
846                                            u32 chunk_id,
847                                            struct yaffs_tnode *passed_tn)
848 {
849         int required_depth;
850         int i;
851         int l;
852         struct yaffs_tnode *tn;
853         u32 x;
854
855         /* Check sane level and page Id */
856         if (file_struct->top_level < 0 ||
857             file_struct->top_level > YAFFS_TNODES_MAX_LEVEL)
858                 return NULL;
859
860         if (chunk_id > YAFFS_MAX_CHUNK_ID)
861                 return NULL;
862
863         /* First check we're tall enough (ie enough top_level) */
864
865         x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
866         required_depth = 0;
867         while (x) {
868                 x >>= YAFFS_TNODES_INTERNAL_BITS;
869                 required_depth++;
870         }
871
872         if (required_depth > file_struct->top_level) {
873                 /* Not tall enough, gotta make the tree taller */
874                 for (i = file_struct->top_level; i < required_depth; i++) {
875
876                         tn = yaffs_get_tnode(dev);
877
878                         if (tn) {
879                                 tn->internal[0] = file_struct->top;
880                                 file_struct->top = tn;
881                                 file_struct->top_level++;
882                         } else {
883                                 yaffs_trace(YAFFS_TRACE_ERROR,
884                                         "yaffs: no more tnodes");
885                                 return NULL;
886                         }
887                 }
888         }
889
890         /* Traverse down to level 0, adding anything we need */
891
892         l = file_struct->top_level;
893         tn = file_struct->top;
894
895         if (l > 0) {
896                 while (l > 0 && tn) {
897                         x = (chunk_id >>
898                              (YAFFS_TNODES_LEVEL0_BITS +
899                               (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
900                             YAFFS_TNODES_INTERNAL_MASK;
901
902                         if ((l > 1) && !tn->internal[x]) {
903                                 /* Add missing non-level-zero tnode */
904                                 tn->internal[x] = yaffs_get_tnode(dev);
905                                 if (!tn->internal[x])
906                                         return NULL;
907                         } else if (l == 1) {
908                                 /* Looking from level 1 at level 0 */
909                                 if (passed_tn) {
910                                         /* If we already have one, release it */
911                                         if (tn->internal[x])
912                                                 yaffs_free_tnode(dev,
913                                                         tn->internal[x]);
914                                         tn->internal[x] = passed_tn;
915
916                                 } else if (!tn->internal[x]) {
917                                         /* Don't have one, none passed in */
918                                         tn->internal[x] = yaffs_get_tnode(dev);
919                                         if (!tn->internal[x])
920                                                 return NULL;
921                                 }
922                         }
923
924                         tn = tn->internal[x];
925                         l--;
926                 }
927         } else {
928                 /* top is level 0 */
929                 if (passed_tn) {
930                         memcpy(tn, passed_tn,
931                                (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8);
932                         yaffs_free_tnode(dev, passed_tn);
933                 }
934         }
935
936         return tn;
937 }
938
939 static int yaffs_tags_match(const struct yaffs_ext_tags *tags, int obj_id,
940                             int chunk_obj)
941 {
942         return (tags->chunk_id == chunk_obj &&
943                 tags->obj_id == obj_id &&
944                 !tags->is_deleted) ? 1 : 0;
945
946 }
947
948 static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, int the_chunk,
949                                         struct yaffs_ext_tags *tags, int obj_id,
950                                         int inode_chunk)
951 {
952         int j;
953
954         for (j = 0; the_chunk && j < dev->chunk_grp_size; j++) {
955                 if (yaffs_check_chunk_bit
956                     (dev, the_chunk / dev->param.chunks_per_block,
957                      the_chunk % dev->param.chunks_per_block)) {
958
959                         if (dev->chunk_grp_size == 1)
960                                 return the_chunk;
961                         else {
962                                 yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
963                                                          tags);
964                                 if (yaffs_tags_match(tags,
965                                                         obj_id, inode_chunk)) {
966                                         /* found it; */
967                                         return the_chunk;
968                                 }
969                         }
970                 }
971                 the_chunk++;
972         }
973         return -1;
974 }
975
976 static int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
977                                     struct yaffs_ext_tags *tags)
978 {
979         /*Get the Tnode, then get the level 0 offset chunk offset */
980         struct yaffs_tnode *tn;
981         int the_chunk = -1;
982         struct yaffs_ext_tags local_tags;
983         int ret_val = -1;
984         struct yaffs_dev *dev = in->my_dev;
985
986         if (!tags) {
987                 /* Passed a NULL, so use our own tags space */
988                 tags = &local_tags;
989         }
990
991         tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
992
993         if (!tn)
994                 return ret_val;
995
996         the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
997
998         ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
999                                               inode_chunk);
1000         return ret_val;
1001 }
1002
1003 static int yaffs_find_del_file_chunk(struct yaffs_obj *in, int inode_chunk,
1004                                      struct yaffs_ext_tags *tags)
1005 {
1006         /* Get the Tnode, then get the level 0 offset chunk offset */
1007         struct yaffs_tnode *tn;
1008         int the_chunk = -1;
1009         struct yaffs_ext_tags local_tags;
1010         struct yaffs_dev *dev = in->my_dev;
1011         int ret_val = -1;
1012
1013         if (!tags) {
1014                 /* Passed a NULL, so use our own tags space */
1015                 tags = &local_tags;
1016         }
1017
1018         tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
1019
1020         if (!tn)
1021                 return ret_val;
1022
1023         the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
1024
1025         ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
1026                                               inode_chunk);
1027
1028         /* Delete the entry in the filestructure (if found) */
1029         if (ret_val != -1)
1030                 yaffs_load_tnode_0(dev, tn, inode_chunk, 0);
1031
1032         return ret_val;
1033 }
1034
1035 int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
1036                             int nand_chunk, int in_scan)
1037 {
1038         /* NB in_scan is zero unless scanning.
1039          * For forward scanning, in_scan is > 0;
1040          * for backward scanning in_scan is < 0
1041          *
1042          * nand_chunk = 0 is a dummy insert to make sure the tnodes are there.
1043          */
1044
1045         struct yaffs_tnode *tn;
1046         struct yaffs_dev *dev = in->my_dev;
1047         int existing_cunk;
1048         struct yaffs_ext_tags existing_tags;
1049         struct yaffs_ext_tags new_tags;
1050         unsigned existing_serial, new_serial;
1051
1052         if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) {
1053                 /* Just ignore an attempt at putting a chunk into a non-file
1054                  * during scanning.
1055                  * If it is not during Scanning then something went wrong!
1056                  */
1057                 if (!in_scan) {
1058                         yaffs_trace(YAFFS_TRACE_ERROR,
1059                                 "yaffs tragedy:attempt to put data chunk into a non-file"
1060                                 );
1061                         BUG();
1062                 }
1063
1064                 yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
1065                 return YAFFS_OK;
1066         }
1067
1068         tn = yaffs_add_find_tnode_0(dev,
1069                                     &in->variant.file_variant,
1070                                     inode_chunk, NULL);
1071         if (!tn)
1072                 return YAFFS_FAIL;
1073
1074         if (!nand_chunk)
1075                 /* Dummy insert, bail now */
1076                 return YAFFS_OK;
1077
1078         existing_cunk = yaffs_get_group_base(dev, tn, inode_chunk);
1079
1080         if (in_scan != 0) {
1081                 /* If we're scanning then we need to test for duplicates
1082                  * NB This does not need to be efficient since it should only
1083                  * happen when the power fails during a write, then only one
1084                  * chunk should ever be affected.
1085                  *
1086                  * Correction for YAFFS2: This could happen quite a lot and we
1087                  * need to think about efficiency! TODO
1088                  * Update: For backward scanning we don't need to re-read tags
1089                  * so this is quite cheap.
1090                  */
1091
1092                 if (existing_cunk > 0) {
1093                         /* NB Right now existing chunk will not be real
1094                          * chunk_id if the chunk group size > 1
1095                          * thus we have to do a FindChunkInFile to get the
1096                          * real chunk id.
1097                          *
1098                          * We have a duplicate now we need to decide which
1099                          * one to use:
1100                          *
1101                          * Backwards scanning YAFFS2: The old one is what
1102                          * we use, dump the new one.
1103                          * YAFFS1: Get both sets of tags and compare serial
1104                          * numbers.
1105                          */
1106
1107                         if (in_scan > 0) {
1108                                 /* Only do this for forward scanning */
1109                                 yaffs_rd_chunk_tags_nand(dev,
1110                                                          nand_chunk,
1111                                                          NULL, &new_tags);
1112
1113                                 /* Do a proper find */
1114                                 existing_cunk =
1115                                     yaffs_find_chunk_in_file(in, inode_chunk,
1116                                                              &existing_tags);
1117                         }
1118
1119                         if (existing_cunk <= 0) {
1120                                 /*Hoosterman - how did this happen? */
1121
1122                                 yaffs_trace(YAFFS_TRACE_ERROR,
1123                                         "yaffs tragedy: existing chunk < 0 in scan"
1124                                         );
1125
1126                         }
1127
1128                         /* NB The deleted flags should be false, otherwise
1129                          * the chunks will not be loaded during a scan
1130                          */
1131
1132                         if (in_scan > 0) {
1133                                 new_serial = new_tags.serial_number;
1134                                 existing_serial = existing_tags.serial_number;
1135                         }
1136
1137                         if ((in_scan > 0) &&
1138                             (existing_cunk <= 0 ||
1139                              ((existing_serial + 1) & 3) == new_serial)) {
1140                                 /* Forward scanning.
1141                                  * Use new
1142                                  * Delete the old one and drop through to
1143                                  * update the tnode
1144                                  */
1145                                 yaffs_chunk_del(dev, existing_cunk, 1,
1146                                                 __LINE__);
1147                         } else {
1148                                 /* Backward scanning or we want to use the
1149                                  * existing one
1150                                  * Delete the new one and return early so that
1151                                  * the tnode isn't changed
1152                                  */
1153                                 yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
1154                                 return YAFFS_OK;
1155                         }
1156                 }
1157
1158         }
1159
1160         if (existing_cunk == 0)
1161                 in->n_data_chunks++;
1162
1163         yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk);
1164
1165         return YAFFS_OK;
1166 }
1167
1168 static void yaffs_soft_del_chunk(struct yaffs_dev *dev, int chunk)
1169 {
1170         struct yaffs_block_info *the_block;
1171         unsigned block_no;
1172
1173         yaffs_trace(YAFFS_TRACE_DELETION, "soft delete chunk %d", chunk);
1174
1175         block_no = chunk / dev->param.chunks_per_block;
1176         the_block = yaffs_get_block_info(dev, block_no);
1177         if (the_block) {
1178                 the_block->soft_del_pages++;
1179                 dev->n_free_chunks++;
1180                 yaffs2_update_oldest_dirty_seq(dev, block_no, the_block);
1181         }
1182 }
1183
1184 /* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all
1185  * the chunks in the file.
1186  * All soft deleting does is increment the block's softdelete count and pulls
1187  * the chunk out of the tnode.
1188  * Thus, essentially this is the same as DeleteWorker except that the chunks
1189  * are soft deleted.
1190  */
1191
1192 static int yaffs_soft_del_worker(struct yaffs_obj *in, struct yaffs_tnode *tn,
1193                                  u32 level, int chunk_offset)
1194 {
1195         int i;
1196         int the_chunk;
1197         int all_done = 1;
1198         struct yaffs_dev *dev = in->my_dev;
1199
1200         if (!tn)
1201                 return 1;
1202
1203         if (level > 0) {
1204                 for (i = YAFFS_NTNODES_INTERNAL - 1;
1205                         all_done && i >= 0;
1206                         i--) {
1207                         if (tn->internal[i]) {
1208                                 all_done =
1209                                     yaffs_soft_del_worker(in,
1210                                         tn->internal[i],
1211                                         level - 1,
1212                                         (chunk_offset <<
1213                                         YAFFS_TNODES_INTERNAL_BITS)
1214                                         + i);
1215                                 if (all_done) {
1216                                         yaffs_free_tnode(dev,
1217                                                 tn->internal[i]);
1218                                         tn->internal[i] = NULL;
1219                                 } else {
1220                                         /* Can this happen? */
1221                                 }
1222                         }
1223                 }
1224                 return (all_done) ? 1 : 0;
1225         }
1226
1227         /* level 0 */
1228          for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
1229                 the_chunk = yaffs_get_group_base(dev, tn, i);
1230                 if (the_chunk) {
1231                         yaffs_soft_del_chunk(dev, the_chunk);
1232                         yaffs_load_tnode_0(dev, tn, i, 0);
1233                 }
1234         }
1235         return 1;
1236 }
1237
1238 static void yaffs_remove_obj_from_dir(struct yaffs_obj *obj)
1239 {
1240         struct yaffs_dev *dev = obj->my_dev;
1241         struct yaffs_obj *parent;
1242
1243         yaffs_verify_obj_in_dir(obj);
1244         parent = obj->parent;
1245
1246         yaffs_verify_dir(parent);
1247
1248         if (dev && dev->param.remove_obj_fn)
1249                 dev->param.remove_obj_fn(obj);
1250
1251         list_del_init(&obj->siblings);
1252         obj->parent = NULL;
1253
1254         yaffs_verify_dir(parent);
1255 }
1256
1257 void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj)
1258 {
1259         if (!directory) {
1260                 yaffs_trace(YAFFS_TRACE_ALWAYS,
1261                         "tragedy: Trying to add an object to a null pointer directory"
1262                         );
1263                 BUG();
1264                 return;
1265         }
1266         if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
1267                 yaffs_trace(YAFFS_TRACE_ALWAYS,
1268                         "tragedy: Trying to add an object to a non-directory"
1269                         );
1270                 BUG();
1271         }
1272
1273         if (obj->siblings.prev == NULL) {
1274                 /* Not initialised */
1275                 BUG();
1276         }
1277
1278         yaffs_verify_dir(directory);
1279
1280         yaffs_remove_obj_from_dir(obj);
1281
1282         /* Now add it */
1283         list_add(&obj->siblings, &directory->variant.dir_variant.children);
1284         obj->parent = directory;
1285
1286         if (directory == obj->my_dev->unlinked_dir
1287             || directory == obj->my_dev->del_dir) {
1288                 obj->unlinked = 1;
1289                 obj->my_dev->n_unlinked_files++;
1290                 obj->rename_allowed = 0;
1291         }
1292
1293         yaffs_verify_dir(directory);
1294         yaffs_verify_obj_in_dir(obj);
1295 }
1296
1297 static int yaffs_change_obj_name(struct yaffs_obj *obj,
1298                                  struct yaffs_obj *new_dir,
1299                                  const YCHAR *new_name, int force, int shadows)
1300 {
1301         int unlink_op;
1302         int del_op;
1303         struct yaffs_obj *existing_target;
1304
1305         if (new_dir == NULL)
1306                 new_dir = obj->parent;  /* use the old directory */
1307
1308         if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
1309                 yaffs_trace(YAFFS_TRACE_ALWAYS,
1310                         "tragedy: yaffs_change_obj_name: new_dir is not a directory"
1311                         );
1312                 BUG();
1313         }
1314
1315         unlink_op = (new_dir == obj->my_dev->unlinked_dir);
1316         del_op = (new_dir == obj->my_dev->del_dir);
1317
1318         existing_target = yaffs_find_by_name(new_dir, new_name);
1319
1320         /* If the object is a file going into the unlinked directory,
1321          *   then it is OK to just stuff it in since duplicate names are OK.
1322          *   else only proceed if the new name does not exist and we're putting
1323          *   it into a directory.
1324          */
1325         if (!(unlink_op || del_op || force ||
1326               shadows > 0 || !existing_target) ||
1327               new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
1328                 return YAFFS_FAIL;
1329
1330         yaffs_set_obj_name(obj, new_name);
1331         obj->dirty = 1;
1332         yaffs_add_obj_to_dir(new_dir, obj);
1333
1334         if (unlink_op)
1335                 obj->unlinked = 1;
1336
1337         /* If it is a deletion then we mark it as a shrink for gc  */
1338         if (yaffs_update_oh(obj, new_name, 0, del_op, shadows, NULL) >= 0)
1339                 return YAFFS_OK;
1340
1341         return YAFFS_FAIL;
1342 }
1343
1344 /*------------------------ Short Operations Cache ------------------------------
1345  *   In many situations where there is no high level buffering  a lot of
1346  *   reads might be short sequential reads, and a lot of writes may be short
1347  *   sequential writes. eg. scanning/writing a jpeg file.
1348  *   In these cases, a short read/write cache can provide a huge perfomance
1349  *   benefit with dumb-as-a-rock code.
1350  *   In Linux, the page cache provides read buffering and the short op cache
1351  *   provides write buffering.
1352  *
1353  *   There are a small number (~10) of cache chunks per device so that we don't
1354  *   need a very intelligent search.
1355  */
1356
1357 static int yaffs_obj_cache_dirty(struct yaffs_obj *obj)
1358 {
1359         struct yaffs_dev *dev = obj->my_dev;
1360         int i;
1361         struct yaffs_cache *cache;
1362         int n_caches = obj->my_dev->param.n_caches;
1363
1364         for (i = 0; i < n_caches; i++) {
1365                 cache = &dev->cache[i];
1366                 if (cache->object == obj && cache->dirty)
1367                         return 1;
1368         }
1369
1370         return 0;
1371 }
1372
1373 static void yaffs_flush_file_cache(struct yaffs_obj *obj)
1374 {
1375         struct yaffs_dev *dev = obj->my_dev;
1376         int lowest = -99;       /* Stop compiler whining. */
1377         int i;
1378         struct yaffs_cache *cache;
1379         int chunk_written = 0;
1380         int n_caches = obj->my_dev->param.n_caches;
1381
1382         if (n_caches < 1)
1383                 return;
1384         do {
1385                 cache = NULL;
1386
1387                 /* Find the lowest dirty chunk for this object */
1388                 for (i = 0; i < n_caches; i++) {
1389                         if (dev->cache[i].object == obj &&
1390                             dev->cache[i].dirty) {
1391                                 if (!cache ||
1392                                     dev->cache[i].chunk_id < lowest) {
1393                                         cache = &dev->cache[i];
1394                                         lowest = cache->chunk_id;
1395                                 }
1396                         }
1397                 }
1398
1399                 if (cache && !cache->locked) {
1400                         /* Write it out and free it up */
1401                         chunk_written =
1402                             yaffs_wr_data_obj(cache->object,
1403                                               cache->chunk_id,
1404                                               cache->data,
1405                                               cache->n_bytes, 1);
1406                         cache->dirty = 0;
1407                         cache->object = NULL;
1408                 }
1409         } while (cache && chunk_written > 0);
1410
1411         if (cache)
1412                 /* Hoosterman, disk full while writing cache out. */
1413                 yaffs_trace(YAFFS_TRACE_ERROR,
1414                         "yaffs tragedy: no space during cache write");
1415 }
1416
1417 /*yaffs_flush_whole_cache(dev)
1418  *
1419  *
1420  */
1421
1422 void yaffs_flush_whole_cache(struct yaffs_dev *dev)
1423 {
1424         struct yaffs_obj *obj;
1425         int n_caches = dev->param.n_caches;
1426         int i;
1427
1428         /* Find a dirty object in the cache and flush it...
1429          * until there are no further dirty objects.
1430          */
1431         do {
1432                 obj = NULL;
1433                 for (i = 0; i < n_caches && !obj; i++) {
1434                         if (dev->cache[i].object && dev->cache[i].dirty)
1435                                 obj = dev->cache[i].object;
1436                 }
1437                 if (obj)
1438                         yaffs_flush_file_cache(obj);
1439         } while (obj);
1440
1441 }
1442
1443 /* Grab us a cache chunk for use.
1444  * First look for an empty one.
1445  * Then look for the least recently used non-dirty one.
1446  * Then look for the least recently used dirty one...., flush and look again.
1447  */
1448 static struct yaffs_cache *yaffs_grab_chunk_worker(struct yaffs_dev *dev)
1449 {
1450         int i;
1451
1452         if (dev->param.n_caches > 0) {
1453                 for (i = 0; i < dev->param.n_caches; i++) {
1454                         if (!dev->cache[i].object)
1455                                 return &dev->cache[i];
1456                 }
1457         }
1458         return NULL;
1459 }
1460
1461 static struct yaffs_cache *yaffs_grab_chunk_cache(struct yaffs_dev *dev)
1462 {
1463         struct yaffs_cache *cache;
1464         struct yaffs_obj *the_obj;
1465         int usage;
1466         int i;
1467         int pushout;
1468
1469         if (dev->param.n_caches < 1)
1470                 return NULL;
1471
1472         /* Try find a non-dirty one... */
1473
1474         cache = yaffs_grab_chunk_worker(dev);
1475
1476         if (!cache) {
1477                 /* They were all dirty, find the LRU object and flush
1478                  * its cache, then  find again.
1479                  * NB what's here is not very accurate,
1480                  * we actually flush the object with the LRU chunk.
1481                  */
1482
1483                 /* With locking we can't assume we can use entry zero,
1484                  * Set the_obj to a valid pointer for Coverity. */
1485                 the_obj = dev->cache[0].object;
1486                 usage = -1;
1487                 cache = NULL;
1488                 pushout = -1;
1489
1490                 for (i = 0; i < dev->param.n_caches; i++) {
1491                         if (dev->cache[i].object &&
1492                             !dev->cache[i].locked &&
1493                             (dev->cache[i].last_use < usage ||
1494                             !cache)) {
1495                                 usage = dev->cache[i].last_use;
1496                                 the_obj = dev->cache[i].object;
1497                                 cache = &dev->cache[i];
1498                                 pushout = i;
1499                         }
1500                 }
1501
1502                 if (!cache || cache->dirty) {
1503                         /* Flush and try again */
1504                         yaffs_flush_file_cache(the_obj);
1505                         cache = yaffs_grab_chunk_worker(dev);
1506                 }
1507         }
1508         return cache;
1509 }
1510
1511 /* Find a cached chunk */
1512 static struct yaffs_cache *yaffs_find_chunk_cache(const struct yaffs_obj *obj,
1513                                                   int chunk_id)
1514 {
1515         struct yaffs_dev *dev = obj->my_dev;
1516         int i;
1517
1518         if (dev->param.n_caches < 1)
1519                 return NULL;
1520
1521         for (i = 0; i < dev->param.n_caches; i++) {
1522                 if (dev->cache[i].object == obj &&
1523                     dev->cache[i].chunk_id == chunk_id) {
1524                         dev->cache_hits++;
1525
1526                         return &dev->cache[i];
1527                 }
1528         }
1529         return NULL;
1530 }
1531
1532 /* Mark the chunk for the least recently used algorithym */
1533 static void yaffs_use_cache(struct yaffs_dev *dev, struct yaffs_cache *cache,
1534                             int is_write)
1535 {
1536         int i;
1537
1538         if (dev->param.n_caches < 1)
1539                 return;
1540
1541         if (dev->cache_last_use < 0 ||
1542                 dev->cache_last_use > 100000000) {
1543                 /* Reset the cache usages */
1544                 for (i = 1; i < dev->param.n_caches; i++)
1545                         dev->cache[i].last_use = 0;
1546
1547                 dev->cache_last_use = 0;
1548         }
1549         dev->cache_last_use++;
1550         cache->last_use = dev->cache_last_use;
1551
1552         if (is_write)
1553                 cache->dirty = 1;
1554 }
1555
1556 /* Invalidate a single cache page.
1557  * Do this when a whole page gets written,
1558  * ie the short cache for this page is no longer valid.
1559  */
1560 static void yaffs_invalidate_chunk_cache(struct yaffs_obj *object, int chunk_id)
1561 {
1562         struct yaffs_cache *cache;
1563
1564         if (object->my_dev->param.n_caches > 0) {
1565                 cache = yaffs_find_chunk_cache(object, chunk_id);
1566
1567                 if (cache)
1568                         cache->object = NULL;
1569         }
1570 }
1571
1572 /* Invalidate all the cache pages associated with this object
1573  * Do this whenever ther file is deleted or resized.
1574  */
1575 static void yaffs_invalidate_whole_cache(struct yaffs_obj *in)
1576 {
1577         int i;
1578         struct yaffs_dev *dev = in->my_dev;
1579
1580         if (dev->param.n_caches > 0) {
1581                 /* Invalidate it. */
1582                 for (i = 0; i < dev->param.n_caches; i++) {
1583                         if (dev->cache[i].object == in)
1584                                 dev->cache[i].object = NULL;
1585                 }
1586         }
1587 }
1588
1589 static void yaffs_unhash_obj(struct yaffs_obj *obj)
1590 {
1591         int bucket;
1592         struct yaffs_dev *dev = obj->my_dev;
1593
1594         /* If it is still linked into the bucket list, free from the list */
1595         if (!list_empty(&obj->hash_link)) {
1596                 list_del_init(&obj->hash_link);
1597                 bucket = yaffs_hash_fn(obj->obj_id);
1598                 dev->obj_bucket[bucket].count--;
1599         }
1600 }
1601
1602 /*  FreeObject frees up a Object and puts it back on the free list */
1603 static void yaffs_free_obj(struct yaffs_obj *obj)
1604 {
1605         struct yaffs_dev *dev;
1606
1607         if (!obj) {
1608                 BUG();
1609                 return;
1610         }
1611         dev = obj->my_dev;
1612         yaffs_trace(YAFFS_TRACE_OS, "FreeObject %p inode %p",
1613                 obj, obj->my_inode);
1614         if (obj->parent)
1615                 BUG();
1616         if (!list_empty(&obj->siblings))
1617                 BUG();
1618
1619         if (obj->my_inode) {
1620                 /* We're still hooked up to a cached inode.
1621                  * Don't delete now, but mark for later deletion
1622                  */
1623                 obj->defered_free = 1;
1624                 return;
1625         }
1626
1627         yaffs_unhash_obj(obj);
1628
1629         yaffs_free_raw_obj(dev, obj);
1630         dev->n_obj--;
1631         dev->checkpoint_blocks_required = 0;    /* force recalculation */
1632 }
1633
1634 void yaffs_handle_defered_free(struct yaffs_obj *obj)
1635 {
1636         if (obj->defered_free)
1637                 yaffs_free_obj(obj);
1638 }
1639
1640 static int yaffs_generic_obj_del(struct yaffs_obj *in)
1641 {
1642         /* Iinvalidate the file's data in the cache, without flushing. */
1643         yaffs_invalidate_whole_cache(in);
1644
1645         if (in->my_dev->param.is_yaffs2 && in->parent != in->my_dev->del_dir) {
1646                 /* Move to unlinked directory so we have a deletion record */
1647                 yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0,
1648                                       0);
1649         }
1650
1651         yaffs_remove_obj_from_dir(in);
1652         yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__);
1653         in->hdr_chunk = 0;
1654
1655         yaffs_free_obj(in);
1656         return YAFFS_OK;
1657
1658 }
1659
1660 static void yaffs_soft_del_file(struct yaffs_obj *obj)
1661 {
1662         if (!obj->deleted ||
1663             obj->variant_type != YAFFS_OBJECT_TYPE_FILE ||
1664             obj->soft_del)
1665                 return;
1666
1667         if (obj->n_data_chunks <= 0) {
1668                 /* Empty file with no duplicate object headers,
1669                  * just delete it immediately */
1670                 yaffs_free_tnode(obj->my_dev, obj->variant.file_variant.top);
1671                 obj->variant.file_variant.top = NULL;
1672                 yaffs_trace(YAFFS_TRACE_TRACING,
1673                         "yaffs: Deleting empty file %d",
1674                         obj->obj_id);
1675                 yaffs_generic_obj_del(obj);
1676         } else {
1677                 yaffs_soft_del_worker(obj,
1678                                       obj->variant.file_variant.top,
1679                                       obj->variant.
1680                                       file_variant.top_level, 0);
1681                 obj->soft_del = 1;
1682         }
1683 }
1684
1685 /* Pruning removes any part of the file structure tree that is beyond the
1686  * bounds of the file (ie that does not point to chunks).
1687  *
1688  * A file should only get pruned when its size is reduced.
1689  *
1690  * Before pruning, the chunks must be pulled from the tree and the
1691  * level 0 tnode entries must be zeroed out.
1692  * Could also use this for file deletion, but that's probably better handled
1693  * by a special case.
1694  *
1695  * This function is recursive. For levels > 0 the function is called again on
1696  * any sub-tree. For level == 0 we just check if the sub-tree has data.
1697  * If there is no data in a subtree then it is pruned.
1698  */
1699
1700 static struct yaffs_tnode *yaffs_prune_worker(struct yaffs_dev *dev,
1701                                               struct yaffs_tnode *tn, u32 level,
1702                                               int del0)
1703 {
1704         int i;
1705         int has_data;
1706
1707         if (!tn)
1708                 return tn;
1709
1710         has_data = 0;
1711
1712         if (level > 0) {
1713                 for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
1714                         if (tn->internal[i]) {
1715                                 tn->internal[i] =
1716                                     yaffs_prune_worker(dev,
1717                                                 tn->internal[i],
1718                                                 level - 1,
1719                                                 (i == 0) ? del0 : 1);
1720                         }
1721
1722                         if (tn->internal[i])
1723                                 has_data++;
1724                 }
1725         } else {
1726                 int tnode_size_u32 = dev->tnode_size / sizeof(u32);
1727                 u32 *map = (u32 *) tn;
1728
1729                 for (i = 0; !has_data && i < tnode_size_u32; i++) {
1730                         if (map[i])
1731                                 has_data++;
1732                 }
1733         }
1734
1735         if (has_data == 0 && del0) {
1736                 /* Free and return NULL */
1737                 yaffs_free_tnode(dev, tn);
1738                 tn = NULL;
1739         }
1740         return tn;
1741 }
1742
1743 static int yaffs_prune_tree(struct yaffs_dev *dev,
1744                             struct yaffs_file_var *file_struct)
1745 {
1746         int i;
1747         int has_data;
1748         int done = 0;
1749         struct yaffs_tnode *tn;
1750
1751         if (file_struct->top_level < 1)
1752                 return YAFFS_OK;
1753
1754         file_struct->top =
1755            yaffs_prune_worker(dev, file_struct->top, file_struct->top_level, 0);
1756
1757         /* Now we have a tree with all the non-zero branches NULL but
1758          * the height is the same as it was.
1759          * Let's see if we can trim internal tnodes to shorten the tree.
1760          * We can do this if only the 0th element in the tnode is in use
1761          * (ie all the non-zero are NULL)
1762          */
1763
1764         while (file_struct->top_level && !done) {
1765                 tn = file_struct->top;
1766
1767                 has_data = 0;
1768                 for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
1769                         if (tn->internal[i])
1770                                 has_data++;
1771                 }
1772
1773                 if (!has_data) {
1774                         file_struct->top = tn->internal[0];
1775                         file_struct->top_level--;
1776                         yaffs_free_tnode(dev, tn);
1777                 } else {
1778                         done = 1;
1779                 }
1780         }
1781
1782         return YAFFS_OK;
1783 }
1784
1785 /*-------------------- End of File Structure functions.-------------------*/
1786
1787 /* alloc_empty_obj gets us a clean Object.*/
1788 static struct yaffs_obj *yaffs_alloc_empty_obj(struct yaffs_dev *dev)
1789 {
1790         struct yaffs_obj *obj = yaffs_alloc_raw_obj(dev);
1791
1792         if (!obj)
1793                 return obj;
1794
1795         dev->n_obj++;
1796
1797         /* Now sweeten it up... */
1798
1799         memset(obj, 0, sizeof(struct yaffs_obj));
1800         obj->being_created = 1;
1801
1802         obj->my_dev = dev;
1803         obj->hdr_chunk = 0;
1804         obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN;
1805         INIT_LIST_HEAD(&(obj->hard_links));
1806         INIT_LIST_HEAD(&(obj->hash_link));
1807         INIT_LIST_HEAD(&obj->siblings);
1808
1809         /* Now make the directory sane */
1810         if (dev->root_dir) {
1811                 obj->parent = dev->root_dir;
1812                 list_add(&(obj->siblings),
1813                          &dev->root_dir->variant.dir_variant.children);
1814         }
1815
1816         /* Add it to the lost and found directory.
1817          * NB Can't put root or lost-n-found in lost-n-found so
1818          * check if lost-n-found exists first
1819          */
1820         if (dev->lost_n_found)
1821                 yaffs_add_obj_to_dir(dev->lost_n_found, obj);
1822
1823         obj->being_created = 0;
1824
1825         dev->checkpoint_blocks_required = 0;    /* force recalculation */
1826
1827         return obj;
1828 }
1829
1830 static int yaffs_find_nice_bucket(struct yaffs_dev *dev)
1831 {
1832         int i;
1833         int l = 999;
1834         int lowest = 999999;
1835
1836         /* Search for the shortest list or one that
1837          * isn't too long.
1838          */
1839
1840         for (i = 0; i < 10 && lowest > 4; i++) {
1841                 dev->bucket_finder++;
1842                 dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS;
1843                 if (dev->obj_bucket[dev->bucket_finder].count < lowest) {
1844                         lowest = dev->obj_bucket[dev->bucket_finder].count;
1845                         l = dev->bucket_finder;
1846                 }
1847         }
1848
1849         return l;
1850 }
1851
1852 static int yaffs_new_obj_id(struct yaffs_dev *dev)
1853 {
1854         int bucket = yaffs_find_nice_bucket(dev);
1855         int found = 0;
1856         struct list_head *i;
1857         u32 n = (u32) bucket;
1858
1859         /* Now find an object value that has not already been taken
1860          * by scanning the list.
1861          */
1862
1863         while (!found) {
1864                 found = 1;
1865                 n += YAFFS_NOBJECT_BUCKETS;
1866                 if (1 || dev->obj_bucket[bucket].count > 0) {
1867                         list_for_each(i, &dev->obj_bucket[bucket].list) {
1868                                 /* If there is already one in the list */
1869                                 if (i && list_entry(i, struct yaffs_obj,
1870                                                     hash_link)->obj_id == n) {
1871                                         found = 0;
1872                                 }
1873                         }
1874                 }
1875         }
1876         return n;
1877 }
1878
1879 static void yaffs_hash_obj(struct yaffs_obj *in)
1880 {
1881         int bucket = yaffs_hash_fn(in->obj_id);
1882         struct yaffs_dev *dev = in->my_dev;
1883
1884         list_add(&in->hash_link, &dev->obj_bucket[bucket].list);
1885         dev->obj_bucket[bucket].count++;
1886 }
1887
1888 struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number)
1889 {
1890         int bucket = yaffs_hash_fn(number);
1891         struct list_head *i;
1892         struct yaffs_obj *in;
1893
1894         list_for_each(i, &dev->obj_bucket[bucket].list) {
1895                 /* Look if it is in the list */
1896                 in = list_entry(i, struct yaffs_obj, hash_link);
1897                 if (in->obj_id == number) {
1898                         /* Don't show if it is defered free */
1899                         if (in->defered_free)
1900                                 return NULL;
1901                         return in;
1902                 }
1903         }
1904
1905         return NULL;
1906 }
1907
1908 static struct yaffs_obj *yaffs_new_obj(struct yaffs_dev *dev, int number,
1909                                 enum yaffs_obj_type type)
1910 {
1911         struct yaffs_obj *the_obj = NULL;
1912         struct yaffs_tnode *tn = NULL;
1913
1914         if (number < 0)
1915                 number = yaffs_new_obj_id(dev);
1916
1917         if (type == YAFFS_OBJECT_TYPE_FILE) {
1918                 tn = yaffs_get_tnode(dev);
1919                 if (!tn)
1920                         return NULL;
1921         }
1922
1923         the_obj = yaffs_alloc_empty_obj(dev);
1924         if (!the_obj) {
1925                 if (tn)
1926                         yaffs_free_tnode(dev, tn);
1927                 return NULL;
1928         }
1929
1930         the_obj->fake = 0;
1931         the_obj->rename_allowed = 1;
1932         the_obj->unlink_allowed = 1;
1933         the_obj->obj_id = number;
1934         yaffs_hash_obj(the_obj);
1935         the_obj->variant_type = type;
1936         yaffs_load_current_time(the_obj, 1, 1);
1937
1938         switch (type) {
1939         case YAFFS_OBJECT_TYPE_FILE:
1940                 the_obj->variant.file_variant.file_size = 0;
1941                 the_obj->variant.file_variant.scanned_size = 0;
1942                 the_obj->variant.file_variant.shrink_size =
1943                                                 yaffs_max_file_size(dev);
1944                 the_obj->variant.file_variant.top_level = 0;
1945                 the_obj->variant.file_variant.top = tn;
1946                 break;
1947         case YAFFS_OBJECT_TYPE_DIRECTORY:
1948                 INIT_LIST_HEAD(&the_obj->variant.dir_variant.children);
1949                 INIT_LIST_HEAD(&the_obj->variant.dir_variant.dirty);
1950                 break;
1951         case YAFFS_OBJECT_TYPE_SYMLINK:
1952         case YAFFS_OBJECT_TYPE_HARDLINK:
1953         case YAFFS_OBJECT_TYPE_SPECIAL:
1954                 /* No action required */
1955                 break;
1956         case YAFFS_OBJECT_TYPE_UNKNOWN:
1957                 /* todo this should not happen */
1958                 break;
1959         }
1960         return the_obj;
1961 }
1962
1963 static struct yaffs_obj *yaffs_create_fake_dir(struct yaffs_dev *dev,
1964                                                int number, u32 mode)
1965 {
1966
1967         struct yaffs_obj *obj =
1968             yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
1969
1970         if (!obj)
1971                 return NULL;
1972
1973         obj->fake = 1;  /* it is fake so it might not use NAND */
1974         obj->rename_allowed = 0;
1975         obj->unlink_allowed = 0;
1976         obj->deleted = 0;
1977         obj->unlinked = 0;
1978         obj->yst_mode = mode;
1979         obj->my_dev = dev;
1980         obj->hdr_chunk = 0;     /* Not a valid chunk. */
1981         return obj;
1982
1983 }
1984
1985
1986 static void yaffs_init_tnodes_and_objs(struct yaffs_dev *dev)
1987 {
1988         int i;
1989
1990         dev->n_obj = 0;
1991         dev->n_tnodes = 0;
1992         yaffs_init_raw_tnodes_and_objs(dev);
1993
1994         for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
1995                 INIT_LIST_HEAD(&dev->obj_bucket[i].list);
1996                 dev->obj_bucket[i].count = 0;
1997         }
1998 }
1999
2000 struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
2001                                                  int number,
2002                                                  enum yaffs_obj_type type)
2003 {
2004         struct yaffs_obj *the_obj = NULL;
2005
2006         if (number > 0)
2007                 the_obj = yaffs_find_by_number(dev, number);
2008
2009         if (!the_obj)
2010                 the_obj = yaffs_new_obj(dev, number, type);
2011
2012         return the_obj;
2013
2014 }
2015
2016 YCHAR *yaffs_clone_str(const YCHAR *str)
2017 {
2018         YCHAR *new_str = NULL;
2019         int len;
2020
2021         if (!str)
2022                 str = _Y("");
2023
2024         len = strnlen(str, YAFFS_MAX_ALIAS_LENGTH);
2025         new_str = kmalloc((len + 1) * sizeof(YCHAR), GFP_NOFS);
2026         if (new_str) {
2027                 strncpy(new_str, str, len);
2028                 new_str[len] = 0;
2029         }
2030         return new_str;
2031
2032 }
2033 /*
2034  *yaffs_update_parent() handles fixing a directories mtime and ctime when a new
2035  * link (ie. name) is created or deleted in the directory.
2036  *
2037  * ie.
2038  *   create dir/a : update dir's mtime/ctime
2039  *   rm dir/a:   update dir's mtime/ctime
2040  *   modify dir/a: don't update dir's mtimme/ctime
2041  *
2042  * This can be handled immediately or defered. Defering helps reduce the number
2043  * of updates when many files in a directory are changed within a brief period.
2044  *
2045  * If the directory updating is defered then yaffs_update_dirty_dirs must be
2046  * called periodically.
2047  */
2048
2049 static void yaffs_update_parent(struct yaffs_obj *obj)
2050 {
2051         struct yaffs_dev *dev;
2052
2053         if (!obj)
2054                 return;
2055         dev = obj->my_dev;
2056         obj->dirty = 1;
2057         yaffs_load_current_time(obj, 0, 1);
2058         if (dev->param.defered_dir_update) {
2059                 struct list_head *link = &obj->variant.dir_variant.dirty;
2060
2061                 if (list_empty(link)) {
2062                         list_add(link, &dev->dirty_dirs);
2063                         yaffs_trace(YAFFS_TRACE_BACKGROUND,
2064                           "Added object %d to dirty directories",
2065                            obj->obj_id);
2066                 }
2067
2068         } else {
2069                 yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
2070         }
2071 }
2072
2073 void yaffs_update_dirty_dirs(struct yaffs_dev *dev)
2074 {
2075         struct list_head *link;
2076         struct yaffs_obj *obj;
2077         struct yaffs_dir_var *d_s;
2078         union yaffs_obj_var *o_v;
2079
2080         yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update dirty directories");
2081
2082         while (!list_empty(&dev->dirty_dirs)) {
2083                 link = dev->dirty_dirs.next;
2084                 list_del_init(link);
2085
2086                 d_s = list_entry(link, struct yaffs_dir_var, dirty);
2087                 o_v = list_entry(d_s, union yaffs_obj_var, dir_variant);
2088                 obj = list_entry(o_v, struct yaffs_obj, variant);
2089
2090                 yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update directory %d",
2091                         obj->obj_id);
2092
2093                 if (obj->dirty)
2094                         yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
2095         }
2096 }
2097
2098 /*
2099  * Mknod (create) a new object.
2100  * equiv_obj only has meaning for a hard link;
2101  * alias_str only has meaning for a symlink.
2102  * rdev only has meaning for devices (a subset of special objects)
2103  */
2104
2105 static struct yaffs_obj *yaffs_create_obj(enum yaffs_obj_type type,
2106                                           struct yaffs_obj *parent,
2107                                           const YCHAR *name,
2108                                           u32 mode,
2109                                           u32 uid,
2110                                           u32 gid,
2111                                           struct yaffs_obj *equiv_obj,
2112                                           const YCHAR *alias_str, u32 rdev)
2113 {
2114         struct yaffs_obj *in;
2115         YCHAR *str = NULL;
2116         struct yaffs_dev *dev = parent->my_dev;
2117
2118         /* Check if the entry exists.
2119          * If it does then fail the call since we don't want a dup. */
2120         if (yaffs_find_by_name(parent, name))
2121                 return NULL;
2122
2123         if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
2124                 str = yaffs_clone_str(alias_str);
2125                 if (!str)
2126                         return NULL;
2127         }
2128
2129         in = yaffs_new_obj(dev, -1, type);
2130
2131         if (!in) {
2132                 kfree(str);
2133                 return NULL;
2134         }
2135
2136         in->hdr_chunk = 0;
2137         in->valid = 1;
2138         in->variant_type = type;
2139
2140         in->yst_mode = mode;
2141
2142         yaffs_attribs_init(in, gid, uid, rdev);
2143
2144         in->n_data_chunks = 0;
2145
2146         yaffs_set_obj_name(in, name);
2147         in->dirty = 1;
2148
2149         yaffs_add_obj_to_dir(parent, in);
2150
2151         in->my_dev = parent->my_dev;
2152
2153         switch (type) {
2154         case YAFFS_OBJECT_TYPE_SYMLINK:
2155                 in->variant.symlink_variant.alias = str;
2156                 break;
2157         case YAFFS_OBJECT_TYPE_HARDLINK:
2158                 in->variant.hardlink_variant.equiv_obj = equiv_obj;
2159                 in->variant.hardlink_variant.equiv_id = equiv_obj->obj_id;
2160                 list_add(&in->hard_links, &equiv_obj->hard_links);
2161                 break;
2162         case YAFFS_OBJECT_TYPE_FILE:
2163         case YAFFS_OBJECT_TYPE_DIRECTORY:
2164         case YAFFS_OBJECT_TYPE_SPECIAL:
2165         case YAFFS_OBJECT_TYPE_UNKNOWN:
2166                 /* do nothing */
2167                 break;
2168         }
2169
2170         if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) {
2171                 /* Could not create the object header, fail */
2172                 yaffs_del_obj(in);
2173                 in = NULL;
2174         }
2175
2176         if (in)
2177                 yaffs_update_parent(parent);
2178
2179         return in;
2180 }
2181
2182 struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
2183                                     const YCHAR *name, u32 mode, u32 uid,
2184                                     u32 gid)
2185 {
2186         return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
2187                                 uid, gid, NULL, NULL, 0);
2188 }
2189
2190 struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name,
2191                                    u32 mode, u32 uid, u32 gid)
2192 {
2193         return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
2194                                 mode, uid, gid, NULL, NULL, 0);
2195 }
2196
2197 struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
2198                                        const YCHAR *name, u32 mode, u32 uid,
2199                                        u32 gid, u32 rdev)
2200 {
2201         return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
2202                                 uid, gid, NULL, NULL, rdev);
2203 }
2204
2205 struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
2206                                        const YCHAR *name, u32 mode, u32 uid,
2207                                        u32 gid, const YCHAR *alias)
2208 {
2209         return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
2210                                 uid, gid, NULL, alias, 0);
2211 }
2212
2213 /* yaffs_link_obj returns the object id of the equivalent object.*/
2214 struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name,
2215                                  struct yaffs_obj *equiv_obj)
2216 {
2217         /* Get the real object in case we were fed a hard link obj */
2218         equiv_obj = yaffs_get_equivalent_obj(equiv_obj);
2219
2220         if (yaffs_create_obj(YAFFS_OBJECT_TYPE_HARDLINK,
2221                         parent, name, 0, 0, 0,
2222                         equiv_obj, NULL, 0))
2223                 return equiv_obj;
2224
2225         return NULL;
2226
2227 }
2228
2229
2230
2231 /*---------------------- Block Management and Page Allocation -------------*/
2232
2233 static void yaffs_deinit_blocks(struct yaffs_dev *dev)
2234 {
2235         if (dev->block_info_alt && dev->block_info)
2236                 vfree(dev->block_info);
2237         else
2238                 kfree(dev->block_info);
2239
2240         dev->block_info_alt = 0;
2241
2242         dev->block_info = NULL;
2243
2244         if (dev->chunk_bits_alt && dev->chunk_bits)
2245                 vfree(dev->chunk_bits);
2246         else
2247                 kfree(dev->chunk_bits);
2248         dev->chunk_bits_alt = 0;
2249         dev->chunk_bits = NULL;
2250 }
2251
2252 static int yaffs_init_blocks(struct yaffs_dev *dev)
2253 {
2254         int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
2255
2256         dev->block_info = NULL;
2257         dev->chunk_bits = NULL;
2258         dev->alloc_block = -1;  /* force it to get a new one */
2259
2260         /* If the first allocation strategy fails, thry the alternate one */
2261         dev->block_info =
2262                 kmalloc(n_blocks * sizeof(struct yaffs_block_info), GFP_NOFS);
2263         if (!dev->block_info) {
2264                 dev->block_info =
2265                     vmalloc(n_blocks * sizeof(struct yaffs_block_info));
2266                 dev->block_info_alt = 1;
2267         } else {
2268                 dev->block_info_alt = 0;
2269         }
2270
2271         if (!dev->block_info)
2272                 goto alloc_error;
2273
2274         /* Set up dynamic blockinfo stuff. Round up bytes. */
2275         dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8;
2276         dev->chunk_bits =
2277                 kmalloc(dev->chunk_bit_stride * n_blocks, GFP_NOFS);
2278         if (!dev->chunk_bits) {
2279                 dev->chunk_bits =
2280                     vmalloc(dev->chunk_bit_stride * n_blocks);
2281                 dev->chunk_bits_alt = 1;
2282         } else {
2283                 dev->chunk_bits_alt = 0;
2284         }
2285         if (!dev->chunk_bits)
2286                 goto alloc_error;
2287
2288
2289         memset(dev->block_info, 0, n_blocks * sizeof(struct yaffs_block_info));
2290         memset(dev->chunk_bits, 0, dev->chunk_bit_stride * n_blocks);
2291         return YAFFS_OK;
2292
2293 alloc_error:
2294         yaffs_deinit_blocks(dev);
2295         return YAFFS_FAIL;
2296 }
2297
2298
2299 void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no)
2300 {
2301         struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no);
2302         int erased_ok = 0;
2303         int i;
2304
2305         /* If the block is still healthy erase it and mark as clean.
2306          * If the block has had a data failure, then retire it.
2307          */
2308
2309         yaffs_trace(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
2310                 "yaffs_block_became_dirty block %d state %d %s",
2311                 block_no, bi->block_state,
2312                 (bi->needs_retiring) ? "needs retiring" : "");
2313
2314         yaffs2_clear_oldest_dirty_seq(dev, bi);
2315
2316         bi->block_state = YAFFS_BLOCK_STATE_DIRTY;
2317
2318         /* If this is the block being garbage collected then stop gc'ing */
2319         if (block_no == dev->gc_block)
2320                 dev->gc_block = 0;
2321
2322         /* If this block is currently the best candidate for gc
2323          * then drop as a candidate */
2324         if (block_no == dev->gc_dirtiest) {
2325                 dev->gc_dirtiest = 0;
2326                 dev->gc_pages_in_use = 0;
2327         }
2328
2329         if (!bi->needs_retiring) {
2330                 yaffs2_checkpt_invalidate(dev);
2331                 erased_ok = yaffs_erase_block(dev, block_no);
2332                 if (!erased_ok) {
2333                         dev->n_erase_failures++;
2334                         yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
2335                           "**>> Erasure failed %d", block_no);
2336                 }
2337         }
2338
2339         /* Verify erasure if needed */
2340         if (erased_ok &&
2341             ((yaffs_trace_mask & YAFFS_TRACE_ERASE) ||
2342              !yaffs_skip_verification(dev))) {
2343                 for (i = 0; i < dev->param.chunks_per_block; i++) {
2344                         if (!yaffs_check_chunk_erased(dev,
2345                                 block_no * dev->param.chunks_per_block + i)) {
2346                                 yaffs_trace(YAFFS_TRACE_ERROR,
2347                                         ">>Block %d erasure supposedly OK, but chunk %d not erased",
2348                                         block_no, i);
2349                         }
2350                 }
2351         }
2352
2353         if (!erased_ok) {
2354                 /* We lost a block of free space */
2355                 dev->n_free_chunks -= dev->param.chunks_per_block;
2356                 yaffs_retire_block(dev, block_no);
2357                 yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
2358                         "**>> Block %d retired", block_no);
2359                 return;
2360         }
2361
2362         /* Clean it up... */
2363         bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
2364         bi->seq_number = 0;
2365         dev->n_erased_blocks++;
2366         bi->pages_in_use = 0;
2367         bi->soft_del_pages = 0;
2368         bi->has_shrink_hdr = 0;
2369         bi->skip_erased_check = 1;      /* Clean, so no need to check */
2370         bi->gc_prioritise = 0;
2371         bi->has_summary = 0;
2372
2373         yaffs_clear_chunk_bits(dev, block_no);
2374
2375         yaffs_trace(YAFFS_TRACE_ERASE, "Erased block %d", block_no);
2376 }
2377
2378 static inline int yaffs_gc_process_chunk(struct yaffs_dev *dev,
2379                                         struct yaffs_block_info *bi,
2380                                         int old_chunk, u8 *buffer)
2381 {
2382         int new_chunk;
2383         int mark_flash = 1;
2384         struct yaffs_ext_tags tags;
2385         struct yaffs_obj *object;
2386         int matching_chunk;
2387         int ret_val = YAFFS_OK;
2388
2389         memset(&tags, 0, sizeof(tags));
2390         yaffs_rd_chunk_tags_nand(dev, old_chunk,
2391                                  buffer, &tags);
2392         object = yaffs_find_by_number(dev, tags.obj_id);
2393
2394         yaffs_trace(YAFFS_TRACE_GC_DETAIL,
2395                 "Collecting chunk in block %d, %d %d %d ",
2396                 dev->gc_chunk, tags.obj_id,
2397                 tags.chunk_id, tags.n_bytes);
2398
2399         if (object && !yaffs_skip_verification(dev)) {
2400                 if (tags.chunk_id == 0)
2401                         matching_chunk =
2402                             object->hdr_chunk;
2403                 else if (object->soft_del)
2404                         /* Defeat the test */
2405                         matching_chunk = old_chunk;
2406                 else
2407                         matching_chunk =
2408                             yaffs_find_chunk_in_file
2409                             (object, tags.chunk_id,
2410                              NULL);
2411
2412                 if (old_chunk != matching_chunk)
2413                         yaffs_trace(YAFFS_TRACE_ERROR,
2414                                 "gc: page in gc mismatch: %d %d %d %d",
2415                                 old_chunk,
2416                                 matching_chunk,
2417                                 tags.obj_id,
2418                                 tags.chunk_id);
2419         }
2420
2421         if (!object) {
2422                 yaffs_trace(YAFFS_TRACE_ERROR,
2423                         "page %d in gc has no object: %d %d %d ",
2424                         old_chunk,
2425                         tags.obj_id, tags.chunk_id,
2426                         tags.n_bytes);
2427         }
2428
2429         if (object &&
2430             object->deleted &&
2431             object->soft_del && tags.chunk_id != 0) {
2432                 /* Data chunk in a soft deleted file,
2433                  * throw it away.
2434                  * It's a soft deleted data chunk,
2435                  * No need to copy this, just forget
2436                  * about it and fix up the object.
2437                  */
2438
2439                 /* Free chunks already includes
2440                  * softdeleted chunks, how ever this
2441                  * chunk is going to soon be really
2442                  * deleted which will increment free
2443                  * chunks. We have to decrement free
2444                  * chunks so this works out properly.
2445                  */
2446                 dev->n_free_chunks--;
2447                 bi->soft_del_pages--;
2448
2449                 object->n_data_chunks--;
2450                 if (object->n_data_chunks <= 0) {
2451                         /* remeber to clean up obj */
2452                         dev->gc_cleanup_list[dev->n_clean_ups] = tags.obj_id;
2453                         dev->n_clean_ups++;
2454                 }
2455                 mark_flash = 0;
2456         } else if (object) {
2457                 /* It's either a data chunk in a live
2458                  * file or an ObjectHeader, so we're
2459                  * interested in it.
2460                  * NB Need to keep the ObjectHeaders of
2461                  * deleted files until the whole file
2462                  * has been deleted off
2463                  */
2464                 tags.serial_number++;
2465                 dev->n_gc_copies++;
2466
2467                 if (tags.chunk_id == 0) {
2468                         /* It is an object Id,
2469                          * We need to nuke the
2470                          * shrinkheader flags since its
2471                          * work is done.
2472                          * Also need to clean up
2473                          * shadowing.
2474                          */
2475                         struct yaffs_obj_hdr *oh;
2476                         oh = (struct yaffs_obj_hdr *) buffer;
2477
2478                         oh->is_shrink = 0;
2479                         tags.extra_is_shrink = 0;
2480                         oh->shadows_obj = 0;
2481                         oh->inband_shadowed_obj_id = 0;
2482                         tags.extra_shadows = 0;
2483
2484                         /* Update file size */
2485                         if (object->variant_type == YAFFS_OBJECT_TYPE_FILE) {
2486                                 yaffs_oh_size_load(oh,
2487                                     object->variant.file_variant.file_size);
2488                                 tags.extra_file_size =
2489                                     object->variant.file_variant.file_size;
2490                         }
2491
2492                         yaffs_verify_oh(object, oh, &tags, 1);
2493                         new_chunk =
2494                             yaffs_write_new_chunk(dev, (u8 *) oh, &tags, 1);
2495                 } else {
2496                         new_chunk =
2497                             yaffs_write_new_chunk(dev, buffer, &tags, 1);
2498                 }
2499
2500                 if (new_chunk < 0) {
2501                         ret_val = YAFFS_FAIL;
2502                 } else {
2503
2504                         /* Now fix up the Tnodes etc. */
2505
2506                         if (tags.chunk_id == 0) {
2507                                 /* It's a header */
2508                                 object->hdr_chunk = new_chunk;
2509                                 object->serial = tags.serial_number;
2510                         } else {
2511                                 /* It's a data chunk */
2512                                 yaffs_put_chunk_in_file(object, tags.chunk_id,
2513                                                         new_chunk, 0);
2514                         }
2515                 }
2516         }
2517         if (ret_val == YAFFS_OK)
2518                 yaffs_chunk_del(dev, old_chunk, mark_flash, __LINE__);
2519         return ret_val;
2520 }
2521
2522 static int yaffs_gc_block(struct yaffs_dev *dev, int block, int whole_block)
2523 {
2524         int old_chunk;
2525         int ret_val = YAFFS_OK;
2526         int i;
2527         int is_checkpt_block;
2528         int max_copies;
2529         int chunks_before = yaffs_get_erased_chunks(dev);
2530         int chunks_after;
2531         struct yaffs_block_info *bi = yaffs_get_block_info(dev, block);
2532
2533         is_checkpt_block = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT);
2534
2535         yaffs_trace(YAFFS_TRACE_TRACING,
2536                 "Collecting block %d, in use %d, shrink %d, whole_block %d",
2537                 block, bi->pages_in_use, bi->has_shrink_hdr,
2538                 whole_block);
2539
2540         /*yaffs_verify_free_chunks(dev); */
2541
2542         if (bi->block_state == YAFFS_BLOCK_STATE_FULL)
2543                 bi->block_state = YAFFS_BLOCK_STATE_COLLECTING;
2544
2545         bi->has_shrink_hdr = 0; /* clear the flag so that the block can erase */
2546
2547         dev->gc_disable = 1;
2548
2549         yaffs_summary_gc(dev, block);
2550
2551         if (is_checkpt_block || !yaffs_still_some_chunks(dev, block)) {
2552                 yaffs_trace(YAFFS_TRACE_TRACING,
2553                         "Collecting block %d that has no chunks in use",
2554                         block);
2555                 yaffs_block_became_dirty(dev, block);
2556         } else {
2557
2558                 u8 *buffer = yaffs_get_temp_buffer(dev);
2559
2560                 yaffs_verify_blk(dev, bi, block);
2561
2562                 max_copies = (whole_block) ? dev->param.chunks_per_block : 5;
2563                 old_chunk = block * dev->param.chunks_per_block + dev->gc_chunk;
2564
2565                 for (/* init already done */ ;
2566                      ret_val == YAFFS_OK &&
2567                      dev->gc_chunk < dev->param.chunks_per_block &&
2568                      (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) &&
2569                      max_copies > 0;
2570                      dev->gc_chunk++, old_chunk++) {
2571                         if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) {
2572                                 /* Page is in use and might need to be copied */
2573                                 max_copies--;
2574                                 ret_val = yaffs_gc_process_chunk(dev, bi,
2575                                                         old_chunk, buffer);
2576                         }
2577                 }
2578                 yaffs_release_temp_buffer(dev, buffer);
2579         }
2580
2581         yaffs_verify_collected_blk(dev, bi, block);
2582
2583         if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
2584                 /*
2585                  * The gc did not complete. Set block state back to FULL
2586                  * because checkpointing does not restore gc.
2587                  */
2588                 bi->block_state = YAFFS_BLOCK_STATE_FULL;
2589         } else {
2590                 /* The gc completed. */
2591                 /* Do any required cleanups */
2592                 for (i = 0; i < dev->n_clean_ups; i++) {
2593                         /* Time to delete the file too */
2594                         struct yaffs_obj *object =
2595                             yaffs_find_by_number(dev, dev->gc_cleanup_list[i]);
2596                         if (object) {
2597                                 yaffs_free_tnode(dev,
2598                                           object->variant.file_variant.top);
2599                                 object->variant.file_variant.top = NULL;
2600                                 yaffs_trace(YAFFS_TRACE_GC,
2601                                         "yaffs: About to finally delete object %d",
2602                                         object->obj_id);
2603                                 yaffs_generic_obj_del(object);
2604                                 object->my_dev->n_deleted_files--;
2605                         }
2606
2607                 }
2608                 chunks_after = yaffs_get_erased_chunks(dev);
2609                 if (chunks_before >= chunks_after)
2610                         yaffs_trace(YAFFS_TRACE_GC,
2611                                 "gc did not increase free chunks before %d after %d",
2612                                 chunks_before, chunks_after);
2613                 dev->gc_block = 0;
2614                 dev->gc_chunk = 0;
2615                 dev->n_clean_ups = 0;
2616         }
2617
2618         dev->gc_disable = 0;
2619
2620         return ret_val;
2621 }
2622
2623 /*
2624  * find_gc_block() selects the dirtiest block (or close enough)
2625  * for garbage collection.
2626  */
2627
2628 static unsigned yaffs_find_gc_block(struct yaffs_dev *dev,
2629                                     int aggressive, int background)
2630 {
2631         int i;
2632         int iterations;
2633         unsigned selected = 0;
2634         int prioritised = 0;
2635         int prioritised_exist = 0;
2636         struct yaffs_block_info *bi;
2637         int threshold;
2638
2639         /* First let's see if we need to grab a prioritised block */
2640         if (dev->has_pending_prioritised_gc && !aggressive) {
2641                 dev->gc_dirtiest = 0;
2642                 bi = dev->block_info;
2643                 for (i = dev->internal_start_block;
2644                      i <= dev->internal_end_block && !selected; i++) {
2645
2646                         if (bi->gc_prioritise) {
2647                                 prioritised_exist = 1;
2648                                 if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
2649                                     yaffs_block_ok_for_gc(dev, bi)) {
2650                                         selected = i;
2651                                         prioritised = 1;
2652                                 }
2653                         }
2654                         bi++;
2655                 }
2656
2657                 /*
2658                  * If there is a prioritised block and none was selected then
2659                  * this happened because there is at least one old dirty block
2660                  * gumming up the works. Let's gc the oldest dirty block.
2661                  */
2662
2663                 if (prioritised_exist &&
2664                     !selected && dev->oldest_dirty_block > 0)
2665                         selected = dev->oldest_dirty_block;
2666
2667                 if (!prioritised_exist) /* None found, so we can clear this */
2668                         dev->has_pending_prioritised_gc = 0;
2669         }
2670
2671         /* If we're doing aggressive GC then we are happy to take a less-dirty
2672          * block, and search harder.
2673          * else (leasurely gc), then we only bother to do this if the
2674          * block has only a few pages in use.
2675          */
2676
2677         if (!selected) {
2678                 int pages_used;
2679                 int n_blocks =
2680                     dev->internal_end_block - dev->internal_start_block + 1;
2681                 if (aggressive) {
2682                         threshold = dev->param.chunks_per_block;
2683                         iterations = n_blocks;
2684                 } else {
2685                         int max_threshold;
2686
2687                         if (background)
2688                                 max_threshold = dev->param.chunks_per_block / 2;
2689                         else
2690                                 max_threshold = dev->param.chunks_per_block / 8;
2691
2692                         if (max_threshold < YAFFS_GC_PASSIVE_THRESHOLD)
2693                                 max_threshold = YAFFS_GC_PASSIVE_THRESHOLD;
2694
2695                         threshold = background ? (dev->gc_not_done + 2) * 2 : 0;
2696                         if (threshold < YAFFS_GC_PASSIVE_THRESHOLD)
2697                                 threshold = YAFFS_GC_PASSIVE_THRESHOLD;
2698                         if (threshold > max_threshold)
2699                                 threshold = max_threshold;
2700
2701                         iterations = n_blocks / 16 + 1;
2702                         if (iterations > 100)
2703                                 iterations = 100;
2704                 }
2705
2706                 for (i = 0;
2707                      i < iterations &&
2708                      (dev->gc_dirtiest < 1 ||
2709                       dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH);
2710                      i++) {
2711                         dev->gc_block_finder++;
2712                         if (dev->gc_block_finder < dev->internal_start_block ||
2713                             dev->gc_block_finder > dev->internal_end_block)
2714                                 dev->gc_block_finder =
2715                                     dev->internal_start_block;
2716
2717                         bi = yaffs_get_block_info(dev, dev->gc_block_finder);
2718
2719                         pages_used = bi->pages_in_use - bi->soft_del_pages;
2720
2721                         if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
2722                             pages_used < dev->param.chunks_per_block &&
2723                             (dev->gc_dirtiest < 1 ||
2724                              pages_used < dev->gc_pages_in_use) &&
2725                             yaffs_block_ok_for_gc(dev, bi)) {
2726                                 dev->gc_dirtiest = dev->gc_block_finder;
2727                                 dev->gc_pages_in_use = pages_used;
2728                         }
2729                 }
2730
2731                 if (dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold)
2732                         selected = dev->gc_dirtiest;
2733         }
2734
2735         /*
2736          * If nothing has been selected for a while, try the oldest dirty
2737          * because that's gumming up the works.
2738          */
2739
2740         if (!selected && dev->param.is_yaffs2 &&
2741             dev->gc_not_done >= (background ? 10 : 20)) {
2742                 yaffs2_find_oldest_dirty_seq(dev);
2743                 if (dev->oldest_dirty_block > 0) {
2744                         selected = dev->oldest_dirty_block;
2745                         dev->gc_dirtiest = selected;
2746                         dev->oldest_dirty_gc_count++;
2747                         bi = yaffs_get_block_info(dev, selected);
2748                         dev->gc_pages_in_use =
2749                             bi->pages_in_use - bi->soft_del_pages;
2750                 } else {
2751                         dev->gc_not_done = 0;
2752                 }
2753         }
2754
2755         if (selected) {
2756                 yaffs_trace(YAFFS_TRACE_GC,
2757                         "GC Selected block %d with %d free, prioritised:%d",
2758                         selected,
2759                         dev->param.chunks_per_block - dev->gc_pages_in_use,
2760                         prioritised);
2761
2762                 dev->n_gc_blocks++;
2763                 if (background)
2764                         dev->bg_gcs++;
2765
2766                 dev->gc_dirtiest = 0;
2767                 dev->gc_pages_in_use = 0;
2768                 dev->gc_not_done = 0;
2769                 if (dev->refresh_skip > 0)
2770                         dev->refresh_skip--;
2771         } else {
2772                 dev->gc_not_done++;
2773                 yaffs_trace(YAFFS_TRACE_GC,
2774                         "GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s",
2775                         dev->gc_block_finder, dev->gc_not_done, threshold,
2776                         dev->gc_dirtiest, dev->gc_pages_in_use,
2777                         dev->oldest_dirty_block, background ? " bg" : "");
2778         }
2779
2780         return selected;
2781 }
2782
2783 /* New garbage collector
2784  * If we're very low on erased blocks then we do aggressive garbage collection
2785  * otherwise we do "leasurely" garbage collection.
2786  * Aggressive gc looks further (whole array) and will accept less dirty blocks.
2787  * Passive gc only inspects smaller areas and only accepts more dirty blocks.
2788  *
2789  * The idea is to help clear out space in a more spread-out manner.
2790  * Dunno if it really does anything useful.
2791  */
2792 static int yaffs_check_gc(struct yaffs_dev *dev, int background)
2793 {
2794         int aggressive = 0;
2795         int gc_ok = YAFFS_OK;
2796         int max_tries = 0;
2797         int min_erased;
2798         int erased_chunks;
2799         int checkpt_block_adjust;
2800
2801         if (dev->param.gc_control_fn &&
2802                 (dev->param.gc_control_fn(dev) & 1) == 0)
2803                 return YAFFS_OK;
2804
2805         if (dev->gc_disable)
2806                 /* Bail out so we don't get recursive gc */
2807                 return YAFFS_OK;
2808
2809         /* This loop should pass the first time.
2810          * Only loops here if the collection does not increase space.
2811          */
2812
2813         do {
2814                 max_tries++;
2815
2816                 checkpt_block_adjust = yaffs_calc_checkpt_blocks_required(dev);
2817
2818                 min_erased =
2819                     dev->param.n_reserved_blocks + checkpt_block_adjust + 1;
2820                 erased_chunks =
2821                     dev->n_erased_blocks * dev->param.chunks_per_block;
2822
2823                 /* If we need a block soon then do aggressive gc. */
2824                 if (dev->n_erased_blocks < min_erased)
2825                         aggressive = 1;
2826                 else {
2827                         if (!background
2828                             && erased_chunks > (dev->n_free_chunks / 4))
2829                                 break;
2830
2831                         if (dev->gc_skip > 20)
2832                                 dev->gc_skip = 20;
2833                         if (erased_chunks < dev->n_free_chunks / 2 ||
2834                             dev->gc_skip < 1 || background)
2835                                 aggressive = 0;
2836                         else {
2837                                 dev->gc_skip--;
2838                                 break;
2839                         }
2840                 }
2841
2842                 dev->gc_skip = 5;
2843
2844                 /* If we don't already have a block being gc'd then see if we
2845                  * should start another */
2846
2847                 if (dev->gc_block < 1 && !aggressive) {
2848                         dev->gc_block = yaffs2_find_refresh_block(dev);
2849                         dev->gc_chunk = 0;
2850                         dev->n_clean_ups = 0;
2851                 }
2852                 if (dev->gc_block < 1) {
2853                         dev->gc_block =
2854                             yaffs_find_gc_block(dev, aggressive, background);
2855                         dev->gc_chunk = 0;
2856                         dev->n_clean_ups = 0;
2857                 }
2858
2859                 if (dev->gc_block > 0) {
2860                         dev->all_gcs++;
2861                         if (!aggressive)
2862                                 dev->passive_gc_count++;
2863
2864                         yaffs_trace(YAFFS_TRACE_GC,
2865                                 "yaffs: GC n_erased_blocks %d aggressive %d",
2866                                 dev->n_erased_blocks, aggressive);
2867
2868                         gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive);
2869                 }
2870
2871                 if (dev->n_erased_blocks < (dev->param.n_reserved_blocks) &&
2872                     dev->gc_block > 0) {
2873                         yaffs_trace(YAFFS_TRACE_GC,
2874                                 "yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d",
2875                                 dev->n_erased_blocks, max_tries,
2876                                 dev->gc_block);
2877                 }
2878         } while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) &&
2879                  (dev->gc_block > 0) && (max_tries < 2));
2880
2881         return aggressive ? gc_ok : YAFFS_OK;
2882 }
2883
2884 /*
2885  * yaffs_bg_gc()
2886  * Garbage collects. Intended to be called from a background thread.
2887  * Returns non-zero if at least half the free chunks are erased.
2888  */
2889 int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency)
2890 {
2891         int erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block;
2892
2893         yaffs_trace(YAFFS_TRACE_BACKGROUND, "Background gc %u", urgency);
2894
2895         yaffs_check_gc(dev, 1);
2896         return erased_chunks > dev->n_free_chunks / 2;
2897 }
2898
2899 /*-------------------- Data file manipulation -----------------*/
2900
2901 static int yaffs_rd_data_obj(struct yaffs_obj *in, int inode_chunk, u8 * buffer)
2902 {
2903         int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL);
2904
2905         if (nand_chunk >= 0)
2906                 return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk,
2907                                                 buffer, NULL);
2908         else {
2909                 yaffs_trace(YAFFS_TRACE_NANDACCESS,
2910                         "Chunk %d not found zero instead",
2911                         nand_chunk);
2912                 /* get sane (zero) data if you read a hole */
2913                 memset(buffer, 0, in->my_dev->data_bytes_per_chunk);
2914                 return 0;
2915         }
2916
2917 }
2918
2919 void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
2920                      int lyn)
2921 {
2922         int block;
2923         int page;
2924         struct yaffs_ext_tags tags;
2925         struct yaffs_block_info *bi;
2926
2927         if (chunk_id <= 0)
2928                 return;
2929
2930         dev->n_deletions++;
2931         block = chunk_id / dev->param.chunks_per_block;
2932         page = chunk_id % dev->param.chunks_per_block;
2933
2934         if (!yaffs_check_chunk_bit(dev, block, page))
2935                 yaffs_trace(YAFFS_TRACE_VERIFY,
2936                         "Deleting invalid chunk %d", chunk_id);
2937
2938         bi = yaffs_get_block_info(dev, block);
2939
2940         yaffs2_update_oldest_dirty_seq(dev, block, bi);
2941
2942         yaffs_trace(YAFFS_TRACE_DELETION,
2943                 "line %d delete of chunk %d",
2944                 lyn, chunk_id);
2945
2946         if (!dev->param.is_yaffs2 && mark_flash &&
2947             bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) {
2948
2949                 memset(&tags, 0, sizeof(tags));
2950                 tags.is_deleted = 1;
2951                 yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags);
2952                 yaffs_handle_chunk_update(dev, chunk_id, &tags);
2953         } else {
2954                 dev->n_unmarked_deletions++;
2955         }
2956
2957         /* Pull out of the management area.
2958          * If the whole block became dirty, this will kick off an erasure.
2959          */
2960         if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING ||
2961             bi->block_state == YAFFS_BLOCK_STATE_FULL ||
2962             bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
2963             bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
2964                 dev->n_free_chunks++;
2965                 yaffs_clear_chunk_bit(dev, block, page);
2966                 bi->pages_in_use--;
2967
2968                 if (bi->pages_in_use == 0 &&
2969                     !bi->has_shrink_hdr &&
2970                     bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING &&
2971                     bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCAN) {
2972                         yaffs_block_became_dirty(dev, block);
2973                 }
2974         }
2975 }
2976
2977 static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
2978                              const u8 *buffer, int n_bytes, int use_reserve)
2979 {
2980         /* Find old chunk Need to do this to get serial number
2981          * Write new one and patch into tree.
2982          * Invalidate old tags.
2983          */
2984
2985         int prev_chunk_id;
2986         struct yaffs_ext_tags prev_tags;
2987         int new_chunk_id;
2988         struct yaffs_ext_tags new_tags;
2989         struct yaffs_dev *dev = in->my_dev;
2990
2991         yaffs_check_gc(dev, 0);
2992
2993         /* Get the previous chunk at this location in the file if it exists.
2994          * If it does not exist then put a zero into the tree. This creates
2995          * the tnode now, rather than later when it is harder to clean up.
2996          */
2997         prev_chunk_id = yaffs_find_chunk_in_file(in, inode_chunk, &prev_tags);
2998         if (prev_chunk_id < 1 &&
2999             !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0))
3000                 return 0;
3001
3002         /* Set up new tags */
3003         memset(&new_tags, 0, sizeof(new_tags));
3004
3005         new_tags.chunk_id = inode_chunk;
3006         new_tags.obj_id = in->obj_id;
3007         new_tags.serial_number =
3008             (prev_chunk_id > 0) ? prev_tags.serial_number + 1 : 1;
3009         new_tags.n_bytes = n_bytes;
3010
3011         if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) {
3012                 yaffs_trace(YAFFS_TRACE_ERROR,
3013                   "Writing %d bytes to chunk!!!!!!!!!",
3014                    n_bytes);
3015                 BUG();
3016         }
3017
3018         new_chunk_id =
3019             yaffs_write_new_chunk(dev, buffer, &new_tags, use_reserve);
3020
3021         if (new_chunk_id > 0) {
3022                 yaffs_put_chunk_in_file(in, inode_chunk, new_chunk_id, 0);
3023
3024                 if (prev_chunk_id > 0)
3025                         yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
3026
3027                 yaffs_verify_file_sane(in);
3028         }
3029         return new_chunk_id;
3030
3031 }
3032
3033
3034
3035 static int yaffs_do_xattrib_mod(struct yaffs_obj *obj, int set,
3036                                 const YCHAR *name, const void *value, int size,
3037                                 int flags)
3038 {
3039         struct yaffs_xattr_mod xmod;
3040         int result;
3041
3042         xmod.set = set;
3043         xmod.name = name;
3044         xmod.data = value;
3045         xmod.size = size;
3046         xmod.flags = flags;
3047         xmod.result = -ENOSPC;
3048
3049         result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod);
3050
3051         if (result > 0)
3052                 return xmod.result;
3053         else
3054                 return -ENOSPC;
3055 }
3056
3057 static int yaffs_apply_xattrib_mod(struct yaffs_obj *obj, char *buffer,
3058                                    struct yaffs_xattr_mod *xmod)
3059 {
3060         int retval = 0;
3061         int x_offs = sizeof(struct yaffs_obj_hdr);
3062         struct yaffs_dev *dev = obj->my_dev;
3063         int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
3064         char *x_buffer = buffer + x_offs;
3065
3066         if (xmod->set)
3067                 retval =
3068                     nval_set(x_buffer, x_size, xmod->name, xmod->data,
3069                              xmod->size, xmod->flags);
3070         else
3071                 retval = nval_del(x_buffer, x_size, xmod->name);
3072
3073         obj->has_xattr = nval_hasvalues(x_buffer, x_size);
3074         obj->xattr_known = 1;
3075         xmod->result = retval;
3076
3077         return retval;
3078 }
3079
3080 static int yaffs_do_xattrib_fetch(struct yaffs_obj *obj, const YCHAR *name,
3081                                   void *value, int size)
3082 {
3083         char *buffer = NULL;
3084         int result;
3085         struct yaffs_ext_tags tags;
3086         struct yaffs_dev *dev = obj->my_dev;
3087         int x_offs = sizeof(struct yaffs_obj_hdr);
3088         int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
3089         char *x_buffer;
3090         int retval = 0;
3091
3092         if (obj->hdr_chunk < 1)
3093                 return -ENODATA;
3094
3095         /* If we know that the object has no xattribs then don't do all the
3096          * reading and parsing.
3097          */
3098         if (obj->xattr_known && !obj->has_xattr) {
3099                 if (name)
3100                         return -ENODATA;
3101                 else
3102                         return 0;
3103         }
3104
3105         buffer = (char *)yaffs_get_temp_buffer(dev);
3106         if (!buffer)
3107                 return -ENOMEM;
3108
3109         result =
3110             yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, (u8 *) buffer, &tags);
3111
3112         if (result != YAFFS_OK)
3113                 retval = -ENOENT;
3114         else {
3115                 x_buffer = buffer + x_offs;
3116
3117                 if (!obj->xattr_known) {
3118                         obj->has_xattr = nval_hasvalues(x_buffer, x_size);
3119                         obj->xattr_known = 1;
3120                 }
3121
3122                 if (name)
3123                         retval = nval_get(x_buffer, x_size, name, value, size);
3124                 else
3125                         retval = nval_list(x_buffer, x_size, value, size);
3126         }
3127         yaffs_release_temp_buffer(dev, (u8 *) buffer);
3128         return retval;
3129 }
3130
3131 int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name,
3132                       const void *value, int size, int flags)
3133 {
3134         return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags);
3135 }
3136
3137 int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name)
3138 {
3139         return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0);
3140 }
3141
3142 int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value,
3143                       int size)
3144 {
3145         return yaffs_do_xattrib_fetch(obj, name, value, size);
3146 }
3147
3148 int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size)
3149 {
3150         return yaffs_do_xattrib_fetch(obj, NULL, buffer, size);
3151 }
3152
3153 static void yaffs_check_obj_details_loaded(struct yaffs_obj *in)
3154 {
3155         u8 *buf;
3156         struct yaffs_obj_hdr *oh;
3157         struct yaffs_dev *dev;
3158         struct yaffs_ext_tags tags;
3159         int result;
3160         int alloc_failed = 0;
3161
3162         if (!in || !in->lazy_loaded || in->hdr_chunk < 1)
3163                 return;
3164
3165         dev = in->my_dev;
3166         in->lazy_loaded = 0;
3167         buf = yaffs_get_temp_buffer(dev);
3168
3169         result = yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, buf, &tags);
3170         oh = (struct yaffs_obj_hdr *)buf;
3171
3172         in->yst_mode = oh->yst_mode;
3173         yaffs_load_attribs(in, oh);
3174         yaffs_set_obj_name_from_oh(in, oh);
3175
3176         if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
3177                 in->variant.symlink_variant.alias =
3178                     yaffs_clone_str(oh->alias);
3179                 if (!in->variant.symlink_variant.alias)
3180                         alloc_failed = 1;       /* Not returned */
3181         }
3182         yaffs_release_temp_buffer(dev, buf);
3183 }
3184
3185 static void yaffs_load_name_from_oh(struct yaffs_dev *dev, YCHAR *name,
3186                                     const YCHAR *oh_name, int buff_size)
3187 {
3188 #ifdef CONFIG_YAFFS_AUTO_UNICODE
3189         if (dev->param.auto_unicode) {
3190                 if (*oh_name) {
3191                         /* It is an ASCII name, do an ASCII to
3192                          * unicode conversion */
3193                         const char *ascii_oh_name = (const char *)oh_name;
3194                         int n = buff_size - 1;
3195                         while (n > 0 && *ascii_oh_name) {
3196                                 *name = *ascii_oh_name;
3197                                 name++;
3198                                 ascii_oh_name++;
3199                                 n--;
3200                         }
3201                 } else {
3202                         strncpy(name, oh_name + 1, buff_size - 1);
3203                 }
3204         } else {
3205 #else
3206         (void) dev;
3207         {
3208 #endif
3209                 strncpy(name, oh_name, buff_size - 1);
3210         }
3211 }
3212
3213 static void yaffs_load_oh_from_name(struct yaffs_dev *dev, YCHAR *oh_name,
3214                                     const YCHAR *name)
3215 {
3216 #ifdef CONFIG_YAFFS_AUTO_UNICODE
3217
3218         int is_ascii;
3219         YCHAR *w;
3220
3221         if (dev->param.auto_unicode) {
3222
3223                 is_ascii = 1;
3224                 w = name;
3225
3226                 /* Figure out if the name will fit in ascii character set */
3227                 while (is_ascii && *w) {
3228                         if ((*w) & 0xff00)
3229                                 is_ascii = 0;
3230                         w++;
3231                 }
3232
3233                 if (is_ascii) {
3234                         /* It is an ASCII name, so convert unicode to ascii */
3235                         char *ascii_oh_name = (char *)oh_name;
3236                         int n = YAFFS_MAX_NAME_LENGTH - 1;
3237                         while (n > 0 && *name) {
3238                                 *ascii_oh_name = *name;
3239                                 name++;
3240                                 ascii_oh_name++;
3241                                 n--;
3242                         }
3243                 } else {
3244                         /* Unicode name, so save starting at the second YCHAR */
3245                         *oh_name = 0;
3246                         strncpy(oh_name + 1, name, YAFFS_MAX_NAME_LENGTH - 2);
3247                 }
3248         } else {
3249 #else
3250         dev = dev;
3251         {
3252 #endif
3253                 strncpy(oh_name, name, YAFFS_MAX_NAME_LENGTH - 1);
3254         }
3255 }
3256
3257 /* UpdateObjectHeader updates the header on NAND for an object.
3258  * If name is not NULL, then that new name is used.
3259  */
3260 int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name, int force,
3261                     int is_shrink, int shadows, struct yaffs_xattr_mod *xmod)
3262 {
3263
3264         struct yaffs_block_info *bi;
3265         struct yaffs_dev *dev = in->my_dev;
3266         int prev_chunk_id;
3267         int ret_val = 0;
3268         int result = 0;
3269         int new_chunk_id;
3270         struct yaffs_ext_tags new_tags;
3271         struct yaffs_ext_tags old_tags;
3272         const YCHAR *alias = NULL;
3273         u8 *buffer = NULL;
3274         YCHAR old_name[YAFFS_MAX_NAME_LENGTH + 1];
3275         struct yaffs_obj_hdr *oh = NULL;
3276         loff_t file_size = 0;
3277
3278         strcpy(old_name, _Y("silly old name"));
3279
3280         if (in->fake && in != dev->root_dir && !force && !xmod)
3281                 return ret_val;
3282
3283         yaffs_check_gc(dev, 0);
3284         yaffs_check_obj_details_loaded(in);
3285
3286         buffer = yaffs_get_temp_buffer(in->my_dev);
3287         oh = (struct yaffs_obj_hdr *)buffer;
3288
3289         prev_chunk_id = in->hdr_chunk;
3290
3291         if (prev_chunk_id > 0) {
3292                 result = yaffs_rd_chunk_tags_nand(dev, prev_chunk_id,
3293                                                   buffer, &old_tags);
3294
3295                 yaffs_verify_oh(in, oh, &old_tags, 0);
3296                 memcpy(old_name, oh->name, sizeof(oh->name));
3297                 memset(buffer, 0xff, sizeof(struct yaffs_obj_hdr));
3298         } else {
3299                 memset(buffer, 0xff, dev->data_bytes_per_chunk);
3300         }
3301
3302         oh->type = in->variant_type;
3303         oh->yst_mode = in->yst_mode;
3304         oh->shadows_obj = oh->inband_shadowed_obj_id = shadows;
3305
3306         yaffs_load_attribs_oh(oh, in);
3307
3308         if (in->parent)
3309                 oh->parent_obj_id = in->parent->obj_id;
3310         else
3311                 oh->parent_obj_id = 0;
3312
3313         if (name && *name) {
3314                 memset(oh->name, 0, sizeof(oh->name));
3315                 yaffs_load_oh_from_name(dev, oh->name, name);
3316         } else if (prev_chunk_id > 0) {
3317                 memcpy(oh->name, old_name, sizeof(oh->name));
3318         } else {
3319                 memset(oh->name, 0, sizeof(oh->name));
3320         }
3321
3322         oh->is_shrink = is_shrink;
3323
3324         switch (in->variant_type) {
3325         case YAFFS_OBJECT_TYPE_UNKNOWN:
3326                 /* Should not happen */
3327                 break;
3328         case YAFFS_OBJECT_TYPE_FILE:
3329                 if (oh->parent_obj_id != YAFFS_OBJECTID_DELETED &&
3330                     oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED)
3331                         file_size = in->variant.file_variant.file_size;
3332                 yaffs_oh_size_load(oh, file_size);
3333                 break;
3334         case YAFFS_OBJECT_TYPE_HARDLINK:
3335                 oh->equiv_id = in->variant.hardlink_variant.equiv_id;
3336                 break;
3337         case YAFFS_OBJECT_TYPE_SPECIAL:
3338                 /* Do nothing */
3339                 break;
3340         case YAFFS_OBJECT_TYPE_DIRECTORY:
3341                 /* Do nothing */
3342                 break;
3343         case YAFFS_OBJECT_TYPE_SYMLINK:
3344                 alias = in->variant.symlink_variant.alias;
3345                 if (!alias)
3346                         alias = _Y("no alias");
3347                 strncpy(oh->alias, alias, YAFFS_MAX_ALIAS_LENGTH);
3348                 oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
3349                 break;
3350         }
3351
3352         /* process any xattrib modifications */
3353         if (xmod)
3354                 yaffs_apply_xattrib_mod(in, (char *)buffer, xmod);
3355
3356         /* Tags */
3357         memset(&new_tags, 0, sizeof(new_tags));
3358         in->serial++;
3359         new_tags.chunk_id = 0;
3360         new_tags.obj_id = in->obj_id;
3361         new_tags.serial_number = in->serial;
3362
3363         /* Add extra info for file header */
3364         new_tags.extra_available = 1;
3365         new_tags.extra_parent_id = oh->parent_obj_id;
3366         new_tags.extra_file_size = file_size;
3367         new_tags.extra_is_shrink = oh->is_shrink;
3368         new_tags.extra_equiv_id = oh->equiv_id;
3369         new_tags.extra_shadows = (oh->shadows_obj > 0) ? 1 : 0;
3370         new_tags.extra_obj_type = in->variant_type;
3371         yaffs_verify_oh(in, oh, &new_tags, 1);
3372
3373         /* Create new chunk in NAND */
3374         new_chunk_id =
3375             yaffs_write_new_chunk(dev, buffer, &new_tags,
3376                                   (prev_chunk_id > 0) ? 1 : 0);
3377
3378         if (buffer)
3379                 yaffs_release_temp_buffer(dev, buffer);
3380
3381         if (new_chunk_id < 0)
3382                 return new_chunk_id;
3383
3384         in->hdr_chunk = new_chunk_id;
3385
3386         if (prev_chunk_id > 0)
3387                 yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
3388
3389         if (!yaffs_obj_cache_dirty(in))
3390                 in->dirty = 0;
3391
3392         /* If this was a shrink, then mark the block
3393          * that the chunk lives on */
3394         if (is_shrink) {
3395                 bi = yaffs_get_block_info(in->my_dev,
3396                                           new_chunk_id /
3397                                           in->my_dev->param.chunks_per_block);
3398                 bi->has_shrink_hdr = 1;
3399         }
3400
3401
3402         return new_chunk_id;
3403 }
3404
3405 /*--------------------- File read/write ------------------------
3406  * Read and write have very similar structures.
3407  * In general the read/write has three parts to it
3408  * An incomplete chunk to start with (if the read/write is not chunk-aligned)
3409  * Some complete chunks
3410  * An incomplete chunk to end off with
3411  *
3412  * Curve-balls: the first chunk might also be the last chunk.
3413  */
3414
3415 int yaffs_file_rd(struct yaffs_obj *in, u8 * buffer, loff_t offset, int n_bytes)
3416 {
3417         int chunk;
3418         u32 start;
3419         int n_copy;
3420         int n = n_bytes;
3421         int n_done = 0;<