2 * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
4 * Copyright (C) 2002-2018 Aleph One Ltd.
6 * Created by Charles Manning <charles@aleph1.co.uk>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
14 #include "yaffs_trace.h"
16 #include "yaffs_guts.h"
18 #include "yaffs_cache.h"
19 #include "yaffs_endian.h"
20 #include "yaffs_getblockinfo.h"
21 #include "yaffs_tagscompat.h"
22 #include "yaffs_tagsmarshall.h"
23 #include "yaffs_nand.h"
24 #include "yaffs_yaffs1.h"
25 #include "yaffs_yaffs2.h"
26 #include "yaffs_bitmap.h"
27 #include "yaffs_verify.h"
28 #include "yaffs_nand.h"
29 #include "yaffs_packedtags2.h"
30 #include "yaffs_nameval.h"
31 #include "yaffs_allocator.h"
32 #include "yaffs_attribs.h"
33 #include "yaffs_summary.h"
35 /* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */
36 #define YAFFS_GC_GOOD_ENOUGH 2
37 #define YAFFS_GC_PASSIVE_THRESHOLD 4
39 #include "yaffs_ecc.h"
41 /* Forward declarations */
43 static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR *name,
46 /* Function to calculate chunk and offset */
48 void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
49 int *chunk_out, u32 *offset_out)
54 chunk = (u32) (addr >> dev->chunk_shift);
56 if (dev->chunk_div == 1) {
57 /* easy power of 2 case */
58 offset = (u32) (addr & dev->chunk_mask);
60 /* Non power-of-2 case */
64 chunk /= dev->chunk_div;
66 chunk_base = ((loff_t) chunk) * dev->data_bytes_per_chunk;
67 offset = (u32) (addr - chunk_base);
74 /* Function to return the number of shifts for a power of 2 greater than or
75 * equal to the given number
76 * Note we don't try to cater for all possible numbers and this does not have to
77 * be hellishly efficient.
80 static inline u32 calc_shifts_ceiling(u32 x)
85 shifts = extra_bits = 0;
100 /* Function to return the number of shifts to get a 1 in bit 0
103 static inline u32 calc_shifts(u32 x)
121 * Temporary buffer manipulations.
124 static int yaffs_init_tmp_buffers(struct yaffs_dev *dev)
129 memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer));
131 for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
132 dev->temp_buffer[i].in_use = 0;
133 buf = kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
134 dev->temp_buffer[i].buffer = buf;
137 return buf ? YAFFS_OK : YAFFS_FAIL;
140 u8 *yaffs_get_temp_buffer(struct yaffs_dev * dev)
145 if (dev->temp_in_use > dev->max_temp)
146 dev->max_temp = dev->temp_in_use;
148 for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
149 if (dev->temp_buffer[i].in_use == 0) {
150 dev->temp_buffer[i].in_use = 1;
151 return dev->temp_buffer[i].buffer;
155 yaffs_trace(YAFFS_TRACE_BUFFERS, "Out of temp buffers");
157 * If we got here then we have to allocate an unmanaged one
161 dev->unmanaged_buffer_allocs++;
162 return kmalloc(dev->data_bytes_per_chunk, GFP_NOFS);
166 void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer)
172 for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
173 if (dev->temp_buffer[i].buffer == buffer) {
174 dev->temp_buffer[i].in_use = 0;
180 /* assume it is an unmanaged one. */
181 yaffs_trace(YAFFS_TRACE_BUFFERS,
182 "Releasing unmanaged temp buffer");
184 dev->unmanaged_buffer_deallocs++;
190 * Functions for robustisizing TODO
194 static void yaffs_handle_chunk_wr_ok(struct yaffs_dev *dev, int nand_chunk,
196 const struct yaffs_ext_tags *tags)
204 static void yaffs_handle_chunk_update(struct yaffs_dev *dev, int nand_chunk,
205 const struct yaffs_ext_tags *tags)
212 void yaffs_handle_chunk_error(struct yaffs_dev *dev,
213 struct yaffs_block_info *bi)
215 if (!bi->gc_prioritise) {
216 bi->gc_prioritise = 1;
217 dev->has_pending_prioritised_gc = 1;
218 bi->chunk_error_strikes++;
220 if (bi->chunk_error_strikes > 3) {
221 bi->needs_retiring = 1; /* Too many stikes, so retire */
222 yaffs_trace(YAFFS_TRACE_ALWAYS,
223 "yaffs: Block struck out");
229 static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, int nand_chunk,
232 int flash_block = nand_chunk / dev->param.chunks_per_block;
233 struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
235 yaffs_handle_chunk_error(dev, bi);
238 /* Was an actual write failure,
239 * so mark the block for retirement.*/
240 bi->needs_retiring = 1;
241 yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
242 "**>> Block %d needs retiring", flash_block);
245 /* Delete the chunk */
246 yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
247 yaffs_skip_rest_of_block(dev);
255 * Simple hash function. Needs to have a reasonable spread
258 static inline int yaffs_hash_fn(int n)
262 return n % YAFFS_NOBJECT_BUCKETS;
266 * Access functions to useful fake objects.
267 * Note that root might have a presence in NAND if permissions are set.
270 struct yaffs_obj *yaffs_root(struct yaffs_dev *dev)
272 return dev->root_dir;
275 struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev)
277 return dev->lost_n_found;
281 * Erased NAND checking functions
284 int yaffs_check_ff(u8 *buffer, int n_bytes)
286 /* Horrible, slow implementation */
295 static int yaffs_check_chunk_erased(struct yaffs_dev *dev, int nand_chunk)
297 int retval = YAFFS_OK;
298 u8 *data = yaffs_get_temp_buffer(dev);
299 struct yaffs_ext_tags tags;
302 result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags);
304 if (result == YAFFS_FAIL ||
305 tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR)
308 if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) ||
310 yaffs_trace(YAFFS_TRACE_NANDACCESS,
311 "Chunk %d not erased", nand_chunk);
315 yaffs_release_temp_buffer(dev, data);
321 static int yaffs_verify_chunk_written(struct yaffs_dev *dev,
324 struct yaffs_ext_tags *tags)
326 int retval = YAFFS_OK;
327 struct yaffs_ext_tags temp_tags;
328 u8 *buffer = yaffs_get_temp_buffer(dev);
331 result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags);
332 if (result == YAFFS_FAIL ||
333 memcmp(buffer, data, dev->data_bytes_per_chunk) ||
334 temp_tags.obj_id != tags->obj_id ||
335 temp_tags.chunk_id != tags->chunk_id ||
336 temp_tags.n_bytes != tags->n_bytes)
339 yaffs_release_temp_buffer(dev, buffer);
345 int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks)
348 int reserved_blocks = dev->param.n_reserved_blocks;
351 checkpt_blocks = yaffs_calc_checkpt_blocks_required(dev);
354 (reserved_blocks + checkpt_blocks) * dev->param.chunks_per_block;
356 return (dev->n_free_chunks > (reserved_chunks + n_chunks));
359 static int yaffs_find_alloc_block(struct yaffs_dev *dev)
362 struct yaffs_block_info *bi;
364 if (dev->n_erased_blocks < 1) {
365 /* Hoosterman we've got a problem.
366 * Can't get space to gc
368 yaffs_trace(YAFFS_TRACE_ERROR,
369 "yaffs tragedy: no more erased blocks");
374 /* Find an empty block. */
376 for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
377 dev->alloc_block_finder++;
378 if (dev->alloc_block_finder < (int)dev->internal_start_block
379 || dev->alloc_block_finder > (int)dev->internal_end_block) {
380 dev->alloc_block_finder = dev->internal_start_block;
383 bi = yaffs_get_block_info(dev, dev->alloc_block_finder);
385 if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
386 bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING;
388 bi->seq_number = dev->seq_number;
389 dev->n_erased_blocks--;
390 yaffs_trace(YAFFS_TRACE_ALLOCATE,
391 "Allocated block %d, seq %d, %d left" ,
392 dev->alloc_block_finder, dev->seq_number,
393 dev->n_erased_blocks);
394 return dev->alloc_block_finder;
398 yaffs_trace(YAFFS_TRACE_ALWAYS,
399 "yaffs tragedy: no more erased blocks, but there should have been %d",
400 dev->n_erased_blocks);
405 static int yaffs_alloc_chunk(struct yaffs_dev *dev, int use_reserver,
406 struct yaffs_block_info **block_ptr)
409 struct yaffs_block_info *bi;
411 if (dev->alloc_block < 0) {
412 /* Get next block to allocate off */
413 dev->alloc_block = yaffs_find_alloc_block(dev);
417 if (!use_reserver && !yaffs_check_alloc_available(dev, 1)) {
418 /* No space unless we're allowed to use the reserve. */
422 if (dev->n_erased_blocks < (int)dev->param.n_reserved_blocks
423 && dev->alloc_page == 0)
424 yaffs_trace(YAFFS_TRACE_ALLOCATE, "Allocating reserve");
426 /* Next page please.... */
427 if (dev->alloc_block >= 0) {
428 bi = yaffs_get_block_info(dev, dev->alloc_block);
430 ret_val = (dev->alloc_block * dev->param.chunks_per_block) +
433 yaffs_set_chunk_bit(dev, dev->alloc_block, dev->alloc_page);
437 dev->n_free_chunks--;
439 /* If the block is full set the state to full */
440 if (dev->alloc_page >= dev->param.chunks_per_block) {
441 bi->block_state = YAFFS_BLOCK_STATE_FULL;
442 dev->alloc_block = -1;
451 yaffs_trace(YAFFS_TRACE_ERROR,
452 "!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!");
457 static int yaffs_get_erased_chunks(struct yaffs_dev *dev)
461 n = dev->n_erased_blocks * dev->param.chunks_per_block;
463 if (dev->alloc_block > 0)
464 n += (dev->param.chunks_per_block - dev->alloc_page);
471 * yaffs_skip_rest_of_block() skips over the rest of the allocation block
472 * if we don't want to write to it.
474 void yaffs_skip_rest_of_block(struct yaffs_dev *dev)
476 struct yaffs_block_info *bi;
478 if (dev->alloc_block > 0) {
479 bi = yaffs_get_block_info(dev, dev->alloc_block);
480 if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
481 bi->block_state = YAFFS_BLOCK_STATE_FULL;
482 dev->alloc_block = -1;
487 static int yaffs_write_new_chunk(struct yaffs_dev *dev,
489 struct yaffs_ext_tags *tags, int use_reserver)
495 yaffs2_checkpt_invalidate(dev);
498 struct yaffs_block_info *bi = 0;
501 chunk = yaffs_alloc_chunk(dev, use_reserver, &bi);
507 /* First check this chunk is erased, if it needs
508 * checking. The checking policy (unless forced
509 * always on) is as follows:
511 * Check the first page we try to write in a block.
512 * If the check passes then we don't need to check any
513 * more. If the check fails, we check again...
514 * If the block has been erased, we don't need to check.
516 * However, if the block has been prioritised for gc,
517 * then we think there might be something odd about
518 * this block and stop using it.
520 * Rationale: We should only ever see chunks that have
521 * not been erased if there was a partially written
522 * chunk due to power loss. This checking policy should
523 * catch that case with very few checks and thus save a
524 * lot of checks that are most likely not needed.
527 * If an erase check fails or the write fails we skip the
531 /* let's give it a try */
534 if (dev->param.always_check_erased)
535 bi->skip_erased_check = 0;
537 if (!bi->skip_erased_check) {
538 erased_ok = yaffs_check_chunk_erased(dev, chunk);
539 if (erased_ok != YAFFS_OK) {
540 yaffs_trace(YAFFS_TRACE_ERROR,
541 "**>> yaffs chunk %d was not erased",
544 /* If not erased, delete this one,
545 * skip rest of block and
546 * try another chunk */
547 yaffs_chunk_del(dev, chunk, 1, __LINE__);
548 yaffs_skip_rest_of_block(dev);
553 write_ok = yaffs_wr_chunk_tags_nand(dev, chunk, data, tags);
555 if (!bi->skip_erased_check)
557 yaffs_verify_chunk_written(dev, chunk, data, tags);
559 if (write_ok != YAFFS_OK) {
560 /* Clean up aborted write, skip to next block and
561 * try another chunk */
562 yaffs_handle_chunk_wr_error(dev, chunk, erased_ok);
566 bi->skip_erased_check = 1;
568 /* Copy the data into the robustification buffer */
569 yaffs_handle_chunk_wr_ok(dev, chunk, data, tags);
571 } while (write_ok != YAFFS_OK &&
572 (yaffs_wr_attempts == 0 || attempts <= yaffs_wr_attempts));
578 yaffs_trace(YAFFS_TRACE_ERROR,
579 "**>> yaffs write required %d attempts",
581 dev->n_retried_writes += (attempts - 1);
588 * Block retiring for handling a broken block.
591 static void yaffs_retire_block(struct yaffs_dev *dev, int flash_block)
593 struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
595 yaffs2_checkpt_invalidate(dev);
597 yaffs2_clear_oldest_dirty_seq(dev, bi);
599 if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) {
600 if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) {
601 yaffs_trace(YAFFS_TRACE_ALWAYS,
602 "yaffs: Failed to mark bad and erase block %d",
605 struct yaffs_ext_tags tags;
607 flash_block * dev->param.chunks_per_block;
609 u8 *buffer = yaffs_get_temp_buffer(dev);
611 memset(buffer, 0xff, dev->data_bytes_per_chunk);
612 memset(&tags, 0, sizeof(tags));
613 tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK;
614 if (dev->tagger.write_chunk_tags_fn(dev, chunk_id -
618 yaffs_trace(YAFFS_TRACE_ALWAYS,
619 "yaffs: Failed to write bad block marker to block %d",
622 yaffs_release_temp_buffer(dev, buffer);
626 bi->block_state = YAFFS_BLOCK_STATE_DEAD;
627 bi->gc_prioritise = 0;
628 bi->needs_retiring = 0;
630 dev->n_retired_blocks++;
633 /*---------------- Name handling functions ------------*/
635 static void yaffs_load_name_from_oh(struct yaffs_dev *dev, YCHAR *name,
636 const YCHAR *oh_name, int buff_size)
638 #ifdef CONFIG_YAFFS_AUTO_UNICODE
639 if (dev->param.auto_unicode) {
641 /* It is an ASCII name, do an ASCII to
642 * unicode conversion */
643 const char *ascii_oh_name = (const char *)oh_name;
644 int n = buff_size - 1;
645 while (n > 0 && *ascii_oh_name) {
646 *name = *ascii_oh_name;
652 strncpy(name, oh_name + 1, buff_size - 1);
659 strncpy(name, oh_name, buff_size - 1);
663 static void yaffs_load_oh_from_name(struct yaffs_dev *dev, YCHAR *oh_name,
666 #ifdef CONFIG_YAFFS_AUTO_UNICODE
671 if (dev->param.auto_unicode) {
676 /* Figure out if the name will fit in ascii character set */
677 while (is_ascii && *w) {
684 /* It is an ASCII name, so convert unicode to ascii */
685 char *ascii_oh_name = (char *)oh_name;
686 int n = YAFFS_MAX_NAME_LENGTH - 1;
687 while (n > 0 && *name) {
688 *ascii_oh_name = *name;
694 /* Unicode name, so save starting at the second YCHAR */
696 strncpy(oh_name + 1, name, YAFFS_MAX_NAME_LENGTH - 2);
703 strncpy(oh_name, name, YAFFS_MAX_NAME_LENGTH - 1);
707 static u16 yaffs_calc_name_sum(const YCHAR *name)
715 while ((*name) && i < (YAFFS_MAX_NAME_LENGTH / 2)) {
717 /* 0x1f mask is case insensitive */
718 sum += ((*name) & 0x1f) * i;
726 void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name)
728 memset(obj->short_name, 0, sizeof(obj->short_name));
730 if (name && !name[0]) {
731 yaffs_fix_null_name(obj, obj->short_name,
732 YAFFS_SHORT_NAME_LENGTH);
733 name = obj->short_name;
735 strnlen(name, YAFFS_SHORT_NAME_LENGTH + 1) <=
736 YAFFS_SHORT_NAME_LENGTH) {
737 strcpy(obj->short_name, name);
740 obj->sum = yaffs_calc_name_sum(name);
743 void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
744 const struct yaffs_obj_hdr *oh)
746 #ifdef CONFIG_YAFFS_AUTO_UNICODE
747 YCHAR tmp_name[YAFFS_MAX_NAME_LENGTH + 1];
748 memset(tmp_name, 0, sizeof(tmp_name));
749 yaffs_load_name_from_oh(obj->my_dev, tmp_name, oh->name,
750 YAFFS_MAX_NAME_LENGTH + 1);
751 yaffs_set_obj_name(obj, tmp_name);
753 yaffs_set_obj_name(obj, oh->name);
757 loff_t yaffs_max_file_size(struct yaffs_dev *dev)
759 if (sizeof(loff_t) < 8)
760 return YAFFS_MAX_FILE_SIZE_32;
762 return ((loff_t) YAFFS_MAX_CHUNK_ID) * dev->data_bytes_per_chunk;
765 /*-------------------- TNODES -------------------
767 * List of spare tnodes
768 * The list is hooked together using the first pointer
772 struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev)
774 struct yaffs_tnode *tn = yaffs_alloc_raw_tnode(dev);
777 memset(tn, 0, dev->tnode_size);
781 dev->checkpoint_blocks_required = 0; /* force recalculation */
786 /* FreeTnode frees up a tnode and puts it back on the free list */
787 static void yaffs_free_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
789 yaffs_free_raw_tnode(dev, tn);
791 dev->checkpoint_blocks_required = 0; /* force recalculation */
794 static void yaffs_deinit_tnodes_and_objs(struct yaffs_dev *dev)
796 yaffs_deinit_raw_tnodes_and_objs(dev);
801 static void yaffs_load_tnode_0(struct yaffs_dev *dev, struct yaffs_tnode *tn,
802 unsigned pos, unsigned val)
804 u32 *map = (u32 *) tn;
810 pos &= YAFFS_TNODES_LEVEL0_MASK;
811 val >>= dev->chunk_grp_bits;
813 bit_in_map = pos * dev->tnode_width;
814 word_in_map = bit_in_map / 32;
815 bit_in_word = bit_in_map & (32 - 1);
817 mask = dev->tnode_mask << bit_in_word;
819 map[word_in_map] &= ~mask;
820 map[word_in_map] |= (mask & (val << bit_in_word));
822 if (dev->tnode_width > (32 - bit_in_word)) {
823 bit_in_word = (32 - bit_in_word);
826 dev->tnode_mask >> bit_in_word;
827 map[word_in_map] &= ~mask;
828 map[word_in_map] |= (mask & (val >> bit_in_word));
832 u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
835 u32 *map = (u32 *) tn;
841 pos &= YAFFS_TNODES_LEVEL0_MASK;
843 bit_in_map = pos * dev->tnode_width;
844 word_in_map = bit_in_map / 32;
845 bit_in_word = bit_in_map & (32 - 1);
847 val = map[word_in_map] >> bit_in_word;
849 if (dev->tnode_width > (32 - bit_in_word)) {
850 bit_in_word = (32 - bit_in_word);
852 val |= (map[word_in_map] << bit_in_word);
855 val &= dev->tnode_mask;
856 val <<= dev->chunk_grp_bits;
861 /* ------------------- End of individual tnode manipulation -----------------*/
863 /* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
864 * The look up tree is represented by the top tnode and the number of top_level
865 * in the tree. 0 means only the level 0 tnode is in the tree.
868 /* FindLevel0Tnode finds the level 0 tnode, if one exists. */
869 struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
870 struct yaffs_file_var *file_struct,
873 struct yaffs_tnode *tn = file_struct->top;
876 int level = file_struct->top_level;
880 /* Check sane level and chunk Id */
881 if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
884 if (chunk_id > YAFFS_MAX_CHUNK_ID)
887 /* First check we're tall enough (ie enough top_level) */
889 i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
892 i >>= YAFFS_TNODES_INTERNAL_BITS;
896 if (required_depth > file_struct->top_level)
897 return NULL; /* Not tall enough, so we can't find it */
899 /* Traverse down to level 0 */
900 while (level > 0 && tn) {
901 tn = tn->internal[(chunk_id >>
902 (YAFFS_TNODES_LEVEL0_BITS +
904 YAFFS_TNODES_INTERNAL_BITS)) &
905 YAFFS_TNODES_INTERNAL_MASK];
912 /* add_find_tnode_0 finds the level 0 tnode if it exists,
913 * otherwise first expands the tree.
914 * This happens in two steps:
915 * 1. If the tree isn't tall enough, then make it taller.
916 * 2. Scan down the tree towards the level 0 tnode adding tnodes if required.
918 * Used when modifying the tree.
920 * If the tn argument is NULL, then a fresh tnode will be added otherwise the
921 * specified tn will be plugged into the ttree.
924 struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
925 struct yaffs_file_var *file_struct,
927 struct yaffs_tnode *passed_tn)
932 struct yaffs_tnode *tn;
935 /* Check sane level and page Id */
936 if (file_struct->top_level < 0 ||
937 file_struct->top_level > YAFFS_TNODES_MAX_LEVEL)
940 if (chunk_id > YAFFS_MAX_CHUNK_ID)
943 /* First check we're tall enough (ie enough top_level) */
945 x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
948 x >>= YAFFS_TNODES_INTERNAL_BITS;
952 if (required_depth > file_struct->top_level) {
953 /* Not tall enough, gotta make the tree taller */
954 for (i = file_struct->top_level; i < required_depth; i++) {
956 tn = yaffs_get_tnode(dev);
959 tn->internal[0] = file_struct->top;
960 file_struct->top = tn;
961 file_struct->top_level++;
963 yaffs_trace(YAFFS_TRACE_ERROR,
964 "yaffs: no more tnodes");
970 /* Traverse down to level 0, adding anything we need */
972 l = file_struct->top_level;
973 tn = file_struct->top;
976 while (l > 0 && tn) {
978 (YAFFS_TNODES_LEVEL0_BITS +
979 (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
980 YAFFS_TNODES_INTERNAL_MASK;
982 if ((l > 1) && !tn->internal[x]) {
983 /* Add missing non-level-zero tnode */
984 tn->internal[x] = yaffs_get_tnode(dev);
985 if (!tn->internal[x])
988 /* Looking from level 1 at level 0 */
990 /* If we already have one, release it */
992 yaffs_free_tnode(dev,
994 tn->internal[x] = passed_tn;
996 } else if (!tn->internal[x]) {
997 /* Don't have one, none passed in */
998 tn->internal[x] = yaffs_get_tnode(dev);
999 if (!tn->internal[x])
1004 tn = tn->internal[x];
1008 /* top is level 0 */
1010 memcpy(tn, passed_tn,
1011 (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8);
1012 yaffs_free_tnode(dev, passed_tn);
1019 static int yaffs_tags_match(const struct yaffs_ext_tags *tags, int obj_id,
1022 return (tags->chunk_id == (u32)chunk_obj &&
1023 tags->obj_id == (u32)obj_id &&
1024 !tags->is_deleted) ? 1 : 0;
1028 static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, int the_chunk,
1029 struct yaffs_ext_tags *tags, int obj_id,
1034 for (j = 0; the_chunk && j < dev->chunk_grp_size; j++) {
1035 if (yaffs_check_chunk_bit
1036 (dev, the_chunk / dev->param.chunks_per_block,
1037 the_chunk % dev->param.chunks_per_block)) {
1039 if (dev->chunk_grp_size == 1)
1042 yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
1044 if (yaffs_tags_match(tags,
1045 obj_id, inode_chunk)) {
1056 int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
1057 struct yaffs_ext_tags *tags)
1059 /*Get the Tnode, then get the level 0 offset chunk offset */
1060 struct yaffs_tnode *tn;
1062 struct yaffs_ext_tags local_tags;
1064 struct yaffs_dev *dev = in->my_dev;
1067 /* Passed a NULL, so use our own tags space */
1071 tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
1076 the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
1078 ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
1083 static int yaffs_find_del_file_chunk(struct yaffs_obj *in, int inode_chunk,
1084 struct yaffs_ext_tags *tags)
1086 /* Get the Tnode, then get the level 0 offset chunk offset */
1087 struct yaffs_tnode *tn;
1089 struct yaffs_ext_tags local_tags;
1090 struct yaffs_dev *dev = in->my_dev;
1094 /* Passed a NULL, so use our own tags space */
1098 tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
1103 the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
1105 ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
1108 /* Delete the entry in the filestructure (if found) */
1110 yaffs_load_tnode_0(dev, tn, inode_chunk, 0);
1115 int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
1116 int nand_chunk, int in_scan)
1118 /* NB in_scan is zero unless scanning.
1119 * For forward scanning, in_scan is > 0;
1120 * for backward scanning in_scan is < 0
1122 * nand_chunk = 0 is a dummy insert to make sure the tnodes are there.
1125 struct yaffs_tnode *tn;
1126 struct yaffs_dev *dev = in->my_dev;
1128 struct yaffs_ext_tags existing_tags;
1129 struct yaffs_ext_tags new_tags;
1130 unsigned existing_serial, new_serial;
1132 if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) {
1133 /* Just ignore an attempt at putting a chunk into a non-file
1135 * If it is not during Scanning then something went wrong!
1138 yaffs_trace(YAFFS_TRACE_ERROR,
1139 "yaffs tragedy:attempt to put data chunk into a non-file"
1144 yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
1148 tn = yaffs_add_find_tnode_0(dev,
1149 &in->variant.file_variant,
1155 /* Dummy insert, bail now */
1158 existing_cunk = yaffs_get_group_base(dev, tn, inode_chunk);
1161 /* If we're scanning then we need to test for duplicates
1162 * NB This does not need to be efficient since it should only
1163 * happen when the power fails during a write, then only one
1164 * chunk should ever be affected.
1166 * Correction for YAFFS2: This could happen quite a lot and we
1167 * need to think about efficiency! TODO
1168 * Update: For backward scanning we don't need to re-read tags
1169 * so this is quite cheap.
1172 if (existing_cunk > 0) {
1173 /* NB Right now existing chunk will not be real
1174 * chunk_id if the chunk group size > 1
1175 * thus we have to do a FindChunkInFile to get the
1178 * We have a duplicate now we need to decide which
1181 * Backwards scanning YAFFS2: The old one is what
1182 * we use, dump the new one.
1183 * YAFFS1: Get both sets of tags and compare serial
1188 /* Only do this for forward scanning */
1189 yaffs_rd_chunk_tags_nand(dev,
1193 /* Do a proper find */
1195 yaffs_find_chunk_in_file(in, inode_chunk,
1199 if (existing_cunk <= 0) {
1200 /*Hoosterman - how did this happen? */
1202 yaffs_trace(YAFFS_TRACE_ERROR,
1203 "yaffs tragedy: existing chunk < 0 in scan"
1208 /* NB The deleted flags should be false, otherwise
1209 * the chunks will not be loaded during a scan
1213 new_serial = new_tags.serial_number;
1214 existing_serial = existing_tags.serial_number;
1217 if ((in_scan > 0) &&
1218 (existing_cunk <= 0 ||
1219 ((existing_serial + 1) & 3) == new_serial)) {
1220 /* Forward scanning.
1222 * Delete the old one and drop through to
1225 yaffs_chunk_del(dev, existing_cunk, 1,
1228 /* Backward scanning or we want to use the
1230 * Delete the new one and return early so that
1231 * the tnode isn't changed
1233 yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
1240 if (existing_cunk == 0)
1241 in->n_data_chunks++;
1243 yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk);
1248 static void yaffs_soft_del_chunk(struct yaffs_dev *dev, int chunk)
1250 struct yaffs_block_info *the_block;
1253 yaffs_trace(YAFFS_TRACE_DELETION, "soft delete chunk %d", chunk);
1255 block_no = chunk / dev->param.chunks_per_block;
1256 the_block = yaffs_get_block_info(dev, block_no);
1258 the_block->soft_del_pages++;
1259 dev->n_free_chunks++;
1260 yaffs2_update_oldest_dirty_seq(dev, block_no, the_block);
1264 /* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all
1265 * the chunks in the file.
1266 * All soft deleting does is increment the block's softdelete count and pulls
1267 * the chunk out of the tnode.
1268 * Thus, essentially this is the same as DeleteWorker except that the chunks
1272 static int yaffs_soft_del_worker(struct yaffs_obj *in, struct yaffs_tnode *tn,
1273 u32 level, int chunk_offset)
1278 struct yaffs_dev *dev = in->my_dev;
1284 for (i = YAFFS_NTNODES_INTERNAL - 1;
1287 if (tn->internal[i]) {
1289 yaffs_soft_del_worker(in,
1293 YAFFS_TNODES_INTERNAL_BITS)
1296 yaffs_free_tnode(dev,
1298 tn->internal[i] = NULL;
1300 /* Can this happen? */
1304 return (all_done) ? 1 : 0;
1308 for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
1309 the_chunk = yaffs_get_group_base(dev, tn, i);
1311 yaffs_soft_del_chunk(dev, the_chunk);
1312 yaffs_load_tnode_0(dev, tn, i, 0);
1318 static void yaffs_remove_obj_from_dir(struct yaffs_obj *obj)
1320 struct yaffs_dev *dev = obj->my_dev;
1321 struct yaffs_obj *parent;
1323 yaffs_verify_obj_in_dir(obj);
1324 parent = obj->parent;
1326 yaffs_verify_dir(parent);
1328 if (dev && dev->param.remove_obj_fn)
1329 dev->param.remove_obj_fn(obj);
1331 list_del_init(&obj->siblings);
1334 yaffs_verify_dir(parent);
1337 void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj)
1340 yaffs_trace(YAFFS_TRACE_ALWAYS,
1341 "tragedy: Trying to add an object to a null pointer directory"
1346 if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
1347 yaffs_trace(YAFFS_TRACE_ALWAYS,
1348 "tragedy: Trying to add an object to a non-directory"
1353 if (obj->siblings.prev == NULL) {
1354 /* Not initialised */
1358 yaffs_verify_dir(directory);
1360 yaffs_remove_obj_from_dir(obj);
1363 list_add(&obj->siblings, &directory->variant.dir_variant.children);
1364 obj->parent = directory;
1366 if (directory == obj->my_dev->unlinked_dir
1367 || directory == obj->my_dev->del_dir) {
1369 obj->my_dev->n_unlinked_files++;
1370 obj->rename_allowed = 0;
1373 yaffs_verify_dir(directory);
1374 yaffs_verify_obj_in_dir(obj);
1377 static int yaffs_change_obj_name(struct yaffs_obj *obj,
1378 struct yaffs_obj *new_dir,
1379 const YCHAR *new_name, int force, int shadows)
1383 struct yaffs_obj *existing_target;
1385 if (new_dir == NULL)
1386 new_dir = obj->parent; /* use the old directory */
1388 if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
1389 yaffs_trace(YAFFS_TRACE_ALWAYS,
1390 "tragedy: yaffs_change_obj_name: new_dir is not a directory"
1395 unlink_op = (new_dir == obj->my_dev->unlinked_dir);
1396 del_op = (new_dir == obj->my_dev->del_dir);
1398 existing_target = yaffs_find_by_name(new_dir, new_name);
1400 /* If the object is a file going into the unlinked directory,
1401 * then it is OK to just stuff it in since duplicate names are OK.
1402 * else only proceed if the new name does not exist and we're putting
1403 * it into a directory.
1405 if (!(unlink_op || del_op || force ||
1406 shadows > 0 || !existing_target) ||
1407 new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
1410 yaffs_set_obj_name(obj, new_name);
1412 yaffs_add_obj_to_dir(new_dir, obj);
1417 /* If it is a deletion then we mark it as a shrink for gc */
1418 if (yaffs_update_oh(obj, new_name, 0, del_op, shadows, NULL) >= 0)
1425 static void yaffs_unhash_obj(struct yaffs_obj *obj)
1428 struct yaffs_dev *dev = obj->my_dev;
1430 /* If it is still linked into the bucket list, free from the list */
1431 if (!list_empty(&obj->hash_link)) {
1432 list_del_init(&obj->hash_link);
1433 bucket = yaffs_hash_fn(obj->obj_id);
1434 dev->obj_bucket[bucket].count--;
1438 /* FreeObject frees up a Object and puts it back on the free list */
1439 static void yaffs_free_obj(struct yaffs_obj *obj)
1441 struct yaffs_dev *dev;
1448 yaffs_trace(YAFFS_TRACE_OS, "FreeObject %p inode %p",
1449 obj, obj->my_inode);
1452 if (!list_empty(&obj->siblings))
1455 if (obj->my_inode) {
1456 /* We're still hooked up to a cached inode.
1457 * Don't delete now, but mark for later deletion
1459 obj->defered_free = 1;
1463 yaffs_unhash_obj(obj);
1465 yaffs_free_raw_obj(dev, obj);
1467 dev->checkpoint_blocks_required = 0; /* force recalculation */
1470 void yaffs_handle_defered_free(struct yaffs_obj *obj)
1472 if (obj->defered_free)
1473 yaffs_free_obj(obj);
1476 static int yaffs_generic_obj_del(struct yaffs_obj *in)
1478 /* Iinvalidate the file's data in the cache, without flushing. */
1479 yaffs_invalidate_file_cache(in);
1481 if (in->my_dev->param.is_yaffs2 && in->parent != in->my_dev->del_dir) {
1482 /* Move to unlinked directory so we have a deletion record */
1483 yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0,
1487 yaffs_remove_obj_from_dir(in);
1488 yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__);
1496 static void yaffs_soft_del_file(struct yaffs_obj *obj)
1498 if (!obj->deleted ||
1499 obj->variant_type != YAFFS_OBJECT_TYPE_FILE ||
1503 if (obj->n_data_chunks <= 0) {
1504 /* Empty file with no duplicate object headers,
1505 * just delete it immediately */
1506 yaffs_free_tnode(obj->my_dev, obj->variant.file_variant.top);
1507 obj->variant.file_variant.top = NULL;
1508 yaffs_trace(YAFFS_TRACE_TRACING,
1509 "yaffs: Deleting empty file %d",
1511 yaffs_generic_obj_del(obj);
1513 yaffs_soft_del_worker(obj,
1514 obj->variant.file_variant.top,
1516 file_variant.top_level, 0);
1521 /* Pruning removes any part of the file structure tree that is beyond the
1522 * bounds of the file (ie that does not point to chunks).
1524 * A file should only get pruned when its size is reduced.
1526 * Before pruning, the chunks must be pulled from the tree and the
1527 * level 0 tnode entries must be zeroed out.
1528 * Could also use this for file deletion, but that's probably better handled
1529 * by a special case.
1531 * This function is recursive. For levels > 0 the function is called again on
1532 * any sub-tree. For level == 0 we just check if the sub-tree has data.
1533 * If there is no data in a subtree then it is pruned.
1536 static struct yaffs_tnode *yaffs_prune_worker(struct yaffs_dev *dev,
1537 struct yaffs_tnode *tn, u32 level,
1549 for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
1550 if (tn->internal[i]) {
1552 yaffs_prune_worker(dev,
1555 (i == 0) ? del0 : 1);
1558 if (tn->internal[i])
1562 int tnode_size_u32 = dev->tnode_size / sizeof(u32);
1563 u32 *map = (u32 *) tn;
1565 for (i = 0; !has_data && i < tnode_size_u32; i++) {
1571 if (has_data == 0 && del0) {
1572 /* Free and return NULL */
1573 yaffs_free_tnode(dev, tn);
1579 static int yaffs_prune_tree(struct yaffs_dev *dev,
1580 struct yaffs_file_var *file_struct)
1585 struct yaffs_tnode *tn;
1587 if (file_struct->top_level < 1)
1591 yaffs_prune_worker(dev, file_struct->top, file_struct->top_level, 0);
1593 /* Now we have a tree with all the non-zero branches NULL but
1594 * the height is the same as it was.
1595 * Let's see if we can trim internal tnodes to shorten the tree.
1596 * We can do this if only the 0th element in the tnode is in use
1597 * (ie all the non-zero are NULL)
1600 while (file_struct->top_level && !done) {
1601 tn = file_struct->top;
1604 for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
1605 if (tn->internal[i])
1610 file_struct->top = tn->internal[0];
1611 file_struct->top_level--;
1612 yaffs_free_tnode(dev, tn);
1621 /*-------------------- End of File Structure functions.-------------------*/
1623 /* alloc_empty_obj gets us a clean Object.*/
1624 static struct yaffs_obj *yaffs_alloc_empty_obj(struct yaffs_dev *dev)
1626 struct yaffs_obj *obj = yaffs_alloc_raw_obj(dev);
1633 /* Now sweeten it up... */
1635 memset(obj, 0, sizeof(struct yaffs_obj));
1636 obj->being_created = 1;
1640 obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN;
1641 INIT_LIST_HEAD(&(obj->hard_links));
1642 INIT_LIST_HEAD(&(obj->hash_link));
1643 INIT_LIST_HEAD(&obj->siblings);
1645 /* Now make the directory sane */
1646 if (dev->root_dir) {
1647 obj->parent = dev->root_dir;
1648 list_add(&(obj->siblings),
1649 &dev->root_dir->variant.dir_variant.children);
1652 /* Add it to the lost and found directory.
1653 * NB Can't put root or lost-n-found in lost-n-found so
1654 * check if lost-n-found exists first
1656 if (dev->lost_n_found)
1657 yaffs_add_obj_to_dir(dev->lost_n_found, obj);
1659 obj->being_created = 0;
1661 dev->checkpoint_blocks_required = 0; /* force recalculation */
1666 static int yaffs_find_nice_bucket(struct yaffs_dev *dev)
1670 int lowest = 999999;
1672 /* Search for the shortest list or one that
1676 for (i = 0; i < 10 && lowest > 4; i++) {
1677 dev->bucket_finder++;
1678 dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS;
1679 if (dev->obj_bucket[dev->bucket_finder].count < lowest) {
1680 lowest = dev->obj_bucket[dev->bucket_finder].count;
1681 l = dev->bucket_finder;
1688 static int yaffs_new_obj_id(struct yaffs_dev *dev)
1690 int bucket = yaffs_find_nice_bucket(dev);
1692 struct list_head *i;
1693 u32 n = (u32) bucket;
1696 * Now find an object value that has not already been taken
1697 * by scanning the list, incrementing each time by number of buckets.
1701 n += YAFFS_NOBJECT_BUCKETS;
1702 list_for_each(i, &dev->obj_bucket[bucket].list) {
1703 /* Check if this value is already taken. */
1704 if (i && list_entry(i, struct yaffs_obj,
1705 hash_link)->obj_id == n)
1712 static void yaffs_hash_obj(struct yaffs_obj *in)
1714 int bucket = yaffs_hash_fn(in->obj_id);
1715 struct yaffs_dev *dev = in->my_dev;
1717 list_add(&in->hash_link, &dev->obj_bucket[bucket].list);
1718 dev->obj_bucket[bucket].count++;
1721 struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number)
1723 int bucket = yaffs_hash_fn(number);
1724 struct list_head *i;
1725 struct yaffs_obj *in;
1727 list_for_each(i, &dev->obj_bucket[bucket].list) {
1728 /* Look if it is in the list */
1729 in = list_entry(i, struct yaffs_obj, hash_link);
1730 if (in->obj_id == number) {
1731 /* Don't show if it is defered free */
1732 if (in->defered_free)
1741 static struct yaffs_obj *yaffs_new_obj(struct yaffs_dev *dev, int number,
1742 enum yaffs_obj_type type)
1744 struct yaffs_obj *the_obj = NULL;
1745 struct yaffs_tnode *tn = NULL;
1748 number = yaffs_new_obj_id(dev);
1750 if (type == YAFFS_OBJECT_TYPE_FILE) {
1751 tn = yaffs_get_tnode(dev);
1756 the_obj = yaffs_alloc_empty_obj(dev);
1759 yaffs_free_tnode(dev, tn);
1764 the_obj->rename_allowed = 1;
1765 the_obj->unlink_allowed = 1;
1766 the_obj->obj_id = number;
1767 yaffs_hash_obj(the_obj);
1768 the_obj->variant_type = type;
1769 yaffs_load_current_time(the_obj, 1, 1);
1772 case YAFFS_OBJECT_TYPE_FILE:
1773 the_obj->variant.file_variant.file_size = 0;
1774 the_obj->variant.file_variant.stored_size = 0;
1775 the_obj->variant.file_variant.shrink_size =
1776 yaffs_max_file_size(dev);
1777 the_obj->variant.file_variant.top_level = 0;
1778 the_obj->variant.file_variant.top = tn;
1780 case YAFFS_OBJECT_TYPE_DIRECTORY:
1781 INIT_LIST_HEAD(&the_obj->variant.dir_variant.children);
1782 INIT_LIST_HEAD(&the_obj->variant.dir_variant.dirty);
1784 case YAFFS_OBJECT_TYPE_SYMLINK:
1785 case YAFFS_OBJECT_TYPE_HARDLINK:
1786 case YAFFS_OBJECT_TYPE_SPECIAL:
1787 /* No action required */
1789 case YAFFS_OBJECT_TYPE_UNKNOWN:
1790 /* todo this should not happen */
1796 static struct yaffs_obj *yaffs_create_fake_dir(struct yaffs_dev *dev,
1797 int number, u32 mode)
1800 struct yaffs_obj *obj =
1801 yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
1806 obj->fake = 1; /* it is fake so it might not use NAND */
1807 obj->rename_allowed = 0;
1808 obj->unlink_allowed = 0;
1811 obj->yst_mode = mode;
1813 obj->hdr_chunk = 0; /* Not a valid chunk. */
1819 static void yaffs_init_tnodes_and_objs(struct yaffs_dev *dev)
1825 yaffs_init_raw_tnodes_and_objs(dev);
1827 for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
1828 INIT_LIST_HEAD(&dev->obj_bucket[i].list);
1829 dev->obj_bucket[i].count = 0;
1833 struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
1835 enum yaffs_obj_type type)
1837 struct yaffs_obj *the_obj = NULL;
1840 the_obj = yaffs_find_by_number(dev, number);
1843 the_obj = yaffs_new_obj(dev, number, type);
1849 YCHAR *yaffs_clone_str(const YCHAR *str)
1851 YCHAR *new_str = NULL;
1857 len = strnlen(str, YAFFS_MAX_ALIAS_LENGTH);
1858 new_str = kmalloc((len + 1) * sizeof(YCHAR), GFP_NOFS);
1860 strncpy(new_str, str, len);
1867 *yaffs_update_parent() handles fixing a directories mtime and ctime when a new
1868 * link (ie. name) is created or deleted in the directory.
1871 * create dir/a : update dir's mtime/ctime
1872 * rm dir/a: update dir's mtime/ctime
1873 * modify dir/a: don't update dir's mtimme/ctime
1875 * This can be handled immediately or defered. Defering helps reduce the number
1876 * of updates when many files in a directory are changed within a brief period.
1878 * If the directory updating is defered then yaffs_update_dirty_dirs must be
1879 * called periodically.
1882 static void yaffs_update_parent(struct yaffs_obj *obj)
1884 struct yaffs_dev *dev;
1890 yaffs_load_current_time(obj, 0, 1);
1891 if (dev->param.defered_dir_update) {
1892 struct list_head *link = &obj->variant.dir_variant.dirty;
1894 if (list_empty(link)) {
1895 list_add(link, &dev->dirty_dirs);
1896 yaffs_trace(YAFFS_TRACE_BACKGROUND,
1897 "Added object %d to dirty directories",
1902 yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
1906 void yaffs_update_dirty_dirs(struct yaffs_dev *dev)
1908 struct list_head *link;
1909 struct yaffs_obj *obj;
1910 struct yaffs_dir_var *d_s;
1911 union yaffs_obj_var *o_v;
1913 yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update dirty directories");
1915 while (!list_empty(&dev->dirty_dirs)) {
1916 link = dev->dirty_dirs.next;
1917 list_del_init(link);
1919 d_s = list_entry(link, struct yaffs_dir_var, dirty);
1920 o_v = list_entry(d_s, union yaffs_obj_var, dir_variant);
1921 obj = list_entry(o_v, struct yaffs_obj, variant);
1923 yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update directory %d",
1927 yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
1932 * Mknod (create) a new object.
1933 * equiv_obj only has meaning for a hard link;
1934 * alias_str only has meaning for a symlink.
1935 * rdev only has meaning for devices (a subset of special objects)
1938 static struct yaffs_obj *yaffs_create_obj(enum yaffs_obj_type type,
1939 struct yaffs_obj *parent,
1944 struct yaffs_obj *equiv_obj,
1945 const YCHAR *alias_str, u32 rdev)
1947 struct yaffs_obj *in;
1949 struct yaffs_dev *dev = parent->my_dev;
1951 /* Check if the entry exists.
1952 * If it does then fail the call since we don't want a dup. */
1953 if (yaffs_find_by_name(parent, name))
1956 if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
1957 str = yaffs_clone_str(alias_str);
1962 in = yaffs_new_obj(dev, -1, type);
1971 in->variant_type = type;
1973 in->yst_mode = mode;
1975 yaffs_attribs_init(in, gid, uid, rdev);
1977 in->n_data_chunks = 0;
1979 yaffs_set_obj_name(in, name);
1982 yaffs_add_obj_to_dir(parent, in);
1984 in->my_dev = parent->my_dev;
1987 case YAFFS_OBJECT_TYPE_SYMLINK:
1988 in->variant.symlink_variant.alias = str;
1990 case YAFFS_OBJECT_TYPE_HARDLINK:
1991 in->variant.hardlink_variant.equiv_obj = equiv_obj;
1992 in->variant.hardlink_variant.equiv_id = equiv_obj->obj_id;
1993 list_add(&in->hard_links, &equiv_obj->hard_links);
1995 case YAFFS_OBJECT_TYPE_FILE:
1996 case YAFFS_OBJECT_TYPE_DIRECTORY:
1997 case YAFFS_OBJECT_TYPE_SPECIAL:
1998 case YAFFS_OBJECT_TYPE_UNKNOWN:
2003 if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) {
2004 /* Could not create the object header, fail */
2010 yaffs_update_parent(parent);
2015 struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
2016 const YCHAR *name, u32 mode, u32 uid,
2019 return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
2020 uid, gid, NULL, NULL, 0);
2023 struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name,
2024 u32 mode, u32 uid, u32 gid)
2026 return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
2027 mode, uid, gid, NULL, NULL, 0);
2030 struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
2031 const YCHAR *name, u32 mode, u32 uid,
2034 return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
2035 uid, gid, NULL, NULL, rdev);
2038 struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
2039 const YCHAR *name, u32 mode, u32 uid,
2040 u32 gid, const YCHAR *alias)
2042 return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
2043 uid, gid, NULL, alias, 0);
2046 /* yaffs_link_obj returns the object id of the equivalent object.*/
2047 struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name,
2048 struct yaffs_obj *equiv_obj)
2050 /* Get the real object in case we were fed a hard link obj */
2051 equiv_obj = yaffs_get_equivalent_obj(equiv_obj);
2053 if (yaffs_create_obj(YAFFS_OBJECT_TYPE_HARDLINK,
2054 parent, name, 0, 0, 0,
2055 equiv_obj, NULL, 0))
2064 /*---------------------- Block Management and Page Allocation -------------*/
2066 static void yaffs_deinit_blocks(struct yaffs_dev *dev)
2068 if (dev->block_info_alt && dev->block_info)
2069 vfree(dev->block_info);
2071 kfree(dev->block_info);
2073 dev->block_info_alt = 0;
2075 dev->block_info = NULL;
2077 if (dev->chunk_bits_alt && dev->chunk_bits)
2078 vfree(dev->chunk_bits);
2080 kfree(dev->chunk_bits);
2081 dev->chunk_bits_alt = 0;
2082 dev->chunk_bits = NULL;
2085 static int yaffs_init_blocks(struct yaffs_dev *dev)
2087 int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
2089 dev->block_info = NULL;
2090 dev->chunk_bits = NULL;
2091 dev->alloc_block = -1; /* force it to get a new one */
2093 /* If the first allocation strategy fails, thry the alternate one */
2095 kmalloc(n_blocks * sizeof(struct yaffs_block_info), GFP_NOFS);
2096 if (!dev->block_info) {
2098 vmalloc(n_blocks * sizeof(struct yaffs_block_info));
2099 dev->block_info_alt = 1;
2101 dev->block_info_alt = 0;
2104 if (!dev->block_info)
2107 /* Set up dynamic blockinfo stuff. Round up bytes. */
2108 dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8;
2110 kmalloc(dev->chunk_bit_stride * n_blocks, GFP_NOFS);
2111 if (!dev->chunk_bits) {
2113 vmalloc(dev->chunk_bit_stride * n_blocks);
2114 dev->chunk_bits_alt = 1;
2116 dev->chunk_bits_alt = 0;
2118 if (!dev->chunk_bits)
2122 memset(dev->block_info, 0, n_blocks * sizeof(struct yaffs_block_info));
2123 memset(dev->chunk_bits, 0, dev->chunk_bit_stride * n_blocks);
2127 yaffs_deinit_blocks(dev);
2132 void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no)
2134 struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no);
2138 /* If the block is still healthy erase it and mark as clean.
2139 * If the block has had a data failure, then retire it.
2142 yaffs_trace(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
2143 "yaffs_block_became_dirty block %d state %d %s",
2144 block_no, bi->block_state,
2145 (bi->needs_retiring) ? "needs retiring" : "");
2147 yaffs2_clear_oldest_dirty_seq(dev, bi);
2149 bi->block_state = YAFFS_BLOCK_STATE_DIRTY;
2151 /* If this is the block being garbage collected then stop gc'ing */
2152 if (block_no == (int)dev->gc_block)
2155 /* If this block is currently the best candidate for gc
2156 * then drop as a candidate */
2157 if (block_no == (int)dev->gc_dirtiest) {
2158 dev->gc_dirtiest = 0;
2159 dev->gc_pages_in_use = 0;
2162 if (!bi->needs_retiring) {
2163 yaffs2_checkpt_invalidate(dev);
2164 erased_ok = yaffs_erase_block(dev, block_no);
2166 dev->n_erase_failures++;
2167 yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
2168 "**>> Erasure failed %d", block_no);
2172 /* Verify erasure if needed */
2174 ((yaffs_trace_mask & YAFFS_TRACE_ERASE) ||
2175 !yaffs_skip_verification(dev))) {
2176 for (i = 0; i < dev->param.chunks_per_block; i++) {
2177 if (!yaffs_check_chunk_erased(dev,
2178 block_no * dev->param.chunks_per_block + i)) {
2179 yaffs_trace(YAFFS_TRACE_ERROR,
2180 ">>Block %d erasure supposedly OK, but chunk %d not erased",
2187 /* We lost a block of free space */
2188 dev->n_free_chunks -= dev->param.chunks_per_block;
2189 yaffs_retire_block(dev, block_no);
2190 yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
2191 "**>> Block %d retired", block_no);
2195 /* Clean it up... */
2196 bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
2198 dev->n_erased_blocks++;
2199 bi->pages_in_use = 0;
2200 bi->soft_del_pages = 0;
2201 bi->has_shrink_hdr = 0;
2202 bi->skip_erased_check = 1; /* Clean, so no need to check */
2203 bi->gc_prioritise = 0;
2204 bi->has_summary = 0;
2206 yaffs_clear_chunk_bits(dev, block_no);
2208 yaffs_trace(YAFFS_TRACE_ERASE, "Erased block %d", block_no);
2211 static inline int yaffs_gc_process_chunk(struct yaffs_dev *dev,
2212 struct yaffs_block_info *bi,
2213 int old_chunk, u8 *buffer)
2217 struct yaffs_ext_tags tags;
2218 struct yaffs_obj *object;
2220 int ret_val = YAFFS_OK;
2222 memset(&tags, 0, sizeof(tags));
2223 yaffs_rd_chunk_tags_nand(dev, old_chunk,
2225 object = yaffs_find_by_number(dev, tags.obj_id);
2227 yaffs_trace(YAFFS_TRACE_GC_DETAIL,
2228 "Collecting chunk in block %d, %d %d %d ",
2229 dev->gc_chunk, tags.obj_id,
2230 tags.chunk_id, tags.n_bytes);
2232 if (object && !yaffs_skip_verification(dev)) {
2233 if (tags.chunk_id == 0)
2236 else if (object->soft_del)
2237 /* Defeat the test */
2238 matching_chunk = old_chunk;
2241 yaffs_find_chunk_in_file
2242 (object, tags.chunk_id,
2245 if (old_chunk != matching_chunk)
2246 yaffs_trace(YAFFS_TRACE_ERROR,
2247 "gc: page in gc mismatch: %d %d %d %d",
2255 yaffs_trace(YAFFS_TRACE_ERROR,
2256 "page %d in gc has no object: %d %d %d ",
2258 tags.obj_id, tags.chunk_id,
2264 object->soft_del && tags.chunk_id != 0) {
2265 /* Data chunk in a soft deleted file,
2267 * It's a soft deleted data chunk,
2268 * No need to copy this, just forget
2269 * about it and fix up the object.
2272 /* Free chunks already includes
2273 * softdeleted chunks, how ever this
2274 * chunk is going to soon be really
2275 * deleted which will increment free
2276 * chunks. We have to decrement free
2277 * chunks so this works out properly.
2279 dev->n_free_chunks--;
2280 bi->soft_del_pages--;
2282 object->n_data_chunks--;
2283 if (object->n_data_chunks <= 0) {
2284 /* remeber to clean up obj */
2285 dev->gc_cleanup_list[dev->n_clean_ups] = tags.obj_id;
2289 } else if (object) {
2290 /* It's either a data chunk in a live
2291 * file or an ObjectHeader, so we're
2293 * NB Need to keep the ObjectHeaders of
2294 * deleted files until the whole file
2295 * has been deleted off
2297 tags.serial_number++;
2300 if (tags.chunk_id == 0) {
2301 /* It is an object Id,
2302 * We need to nuke the shrinkheader flags since its
2304 * Also need to clean up shadowing.
2305 * NB We don't want to do all the work of translating
2306 * object header endianism back and forth so we leave
2307 * the oh endian in its stored order.
2310 struct yaffs_obj_hdr *oh;
2311 oh = (struct yaffs_obj_hdr *) buffer;
2314 tags.extra_is_shrink = 0;
2315 oh->shadows_obj = 0;
2316 oh->inband_shadowed_obj_id = 0;
2317 tags.extra_shadows = 0;
2319 /* Update file size */
2320 if (object->variant_type == YAFFS_OBJECT_TYPE_FILE) {
2321 yaffs_oh_size_load(dev, oh,
2322 object->variant.file_variant.stored_size, 1);
2323 tags.extra_file_size =
2324 object->variant.file_variant.stored_size;
2327 yaffs_verify_oh(object, oh, &tags, 1);
2329 yaffs_write_new_chunk(dev, (u8 *) oh, &tags, 1);
2332 yaffs_write_new_chunk(dev, buffer, &tags, 1);
2335 if (new_chunk < 0) {
2336 ret_val = YAFFS_FAIL;
2339 /* Now fix up the Tnodes etc. */
2341 if (tags.chunk_id == 0) {
2343 object->hdr_chunk = new_chunk;
2344 object->serial = tags.serial_number;
2346 /* It's a data chunk */
2347 yaffs_put_chunk_in_file(object, tags.chunk_id,
2352 if (ret_val == YAFFS_OK)
2353 yaffs_chunk_del(dev, old_chunk, mark_flash, __LINE__);
2357 static int yaffs_gc_block(struct yaffs_dev *dev, int block, int whole_block)
2360 int ret_val = YAFFS_OK;
2362 int is_checkpt_block;
2364 int chunks_before = yaffs_get_erased_chunks(dev);
2366 struct yaffs_block_info *bi = yaffs_get_block_info(dev, block);
2368 is_checkpt_block = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT);
2370 yaffs_trace(YAFFS_TRACE_TRACING,
2371 "Collecting block %d, in use %d, shrink %d, whole_block %d",
2372 block, bi->pages_in_use, bi->has_shrink_hdr,
2375 /*yaffs_verify_free_chunks(dev); */
2377 if (bi->block_state == YAFFS_BLOCK_STATE_FULL)
2378 bi->block_state = YAFFS_BLOCK_STATE_COLLECTING;
2380 bi->has_shrink_hdr = 0; /* clear the flag so that the block can erase */
2382 dev->gc_disable = 1;
2384 yaffs_summary_gc(dev, block);
2386 if (is_checkpt_block || !yaffs_still_some_chunks(dev, block)) {
2387 yaffs_trace(YAFFS_TRACE_TRACING,
2388 "Collecting block %d that has no chunks in use",
2390 yaffs_block_became_dirty(dev, block);
2393 u8 *buffer = yaffs_get_temp_buffer(dev);
2395 yaffs_verify_blk(dev, bi, block);
2397 max_copies = (whole_block) ? dev->param.chunks_per_block : 5;
2398 old_chunk = block * dev->param.chunks_per_block + dev->gc_chunk;
2400 for (/* init already done */ ;
2401 ret_val == YAFFS_OK &&
2402 dev->gc_chunk < dev->param.chunks_per_block &&
2403 (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) &&
2405 dev->gc_chunk++, old_chunk++) {
2406 if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) {
2407 /* Page is in use and might need to be copied */
2409 ret_val = yaffs_gc_process_chunk(dev, bi,
2413 yaffs_release_temp_buffer(dev, buffer);
2416 yaffs_verify_collected_blk(dev, bi, block);
2418 if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
2420 * The gc did not complete. Set block state back to FULL
2421 * because checkpointing does not restore gc.
2423 bi->block_state = YAFFS_BLOCK_STATE_FULL;
2425 /* The gc completed. */
2426 /* Do any required cleanups */
2427 for (i = 0; i < dev->n_clean_ups; i++) {
2428 /* Time to delete the file too */
2429 struct yaffs_obj *object =
2430 yaffs_find_by_number(dev, dev->gc_cleanup_list[i]);
2432 yaffs_free_tnode(dev,
2433 object->variant.file_variant.top);
2434 object->variant.file_variant.top = NULL;
2435 yaffs_trace(YAFFS_TRACE_GC,
2436 "yaffs: About to finally delete object %d",
2438 yaffs_generic_obj_del(object);
2439 object->my_dev->n_deleted_files--;
2443 chunks_after = yaffs_get_erased_chunks(dev);
2444 if (chunks_before >= chunks_after)
2445 yaffs_trace(YAFFS_TRACE_GC,
2446 "gc did not increase free chunks before %d after %d",
2447 chunks_before, chunks_after);
2450 dev->n_clean_ups = 0;
2453 dev->gc_disable = 0;
2459 * find_gc_block() selects the dirtiest block (or close enough)
2460 * for garbage collection.
2463 static unsigned yaffs_find_gc_block(struct yaffs_dev *dev,
2464 int aggressive, int background)
2469 int prioritised = 0;
2470 int prioritised_exist = 0;
2471 struct yaffs_block_info *bi;
2472 u32 threshold = dev->param.chunks_per_block;
2474 /* First let's see if we need to grab a prioritised block */
2475 if (dev->has_pending_prioritised_gc && !aggressive) {
2476 dev->gc_dirtiest = 0;
2477 bi = dev->block_info;
2478 for (i = dev->internal_start_block;
2479 i <= dev->internal_end_block && !selected; i++) {
2481 if (bi->gc_prioritise) {
2482 prioritised_exist = 1;
2483 if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
2484 yaffs_block_ok_for_gc(dev, bi)) {
2493 * If there is a prioritised block and none was selected then
2494 * this happened because there is at least one old dirty block
2495 * gumming up the works. Let's gc the oldest dirty block.
2498 if (prioritised_exist &&
2499 !selected && dev->oldest_dirty_block > 0)
2500 selected = dev->oldest_dirty_block;
2502 if (!prioritised_exist) /* None found, so we can clear this */
2503 dev->has_pending_prioritised_gc = 0;
2506 /* If we're doing aggressive GC then we are happy to take a less-dirty
2507 * block, and search harder.
2508 * else (leasurely gc), then we only bother to do this if the
2509 * block has only a few pages in use.
2515 dev->internal_end_block - dev->internal_start_block + 1;
2517 threshold = dev->param.chunks_per_block;
2518 iterations = n_blocks;
2523 max_threshold = dev->param.chunks_per_block / 2;
2525 max_threshold = dev->param.chunks_per_block / 8;
2527 if (max_threshold < YAFFS_GC_PASSIVE_THRESHOLD)
2528 max_threshold = YAFFS_GC_PASSIVE_THRESHOLD;
2530 threshold = background ? (dev->gc_not_done + 2) * 2 : 0;
2531 if (threshold < YAFFS_GC_PASSIVE_THRESHOLD)
2532 threshold = YAFFS_GC_PASSIVE_THRESHOLD;
2533 if (threshold > max_threshold)
2534 threshold = max_threshold;
2536 iterations = n_blocks / 16 + 1;
2537 if (iterations > 100)
2543 (dev->gc_dirtiest < 1 ||
2544 dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH);
2546 dev->gc_block_finder++;
2547 if (dev->gc_block_finder < dev->internal_start_block ||
2548 dev->gc_block_finder > dev->internal_end_block)
2549 dev->gc_block_finder =
2550 dev->internal_start_block;
2552 bi = yaffs_get_block_info(dev, dev->gc_block_finder);
2554 pages_used = bi->pages_in_use - bi->soft_del_pages;
2556 if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
2557 pages_used < dev->param.chunks_per_block &&
2558 (dev->gc_dirtiest < 1 ||
2559 pages_used < dev->gc_pages_in_use) &&
2560 yaffs_block_ok_for_gc(dev, bi)) {
2561 dev->gc_dirtiest = dev->gc_block_finder;
2562 dev->gc_pages_in_use = pages_used;
2566 if (dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold)
2567 selected = dev->gc_dirtiest;
2571 * If nothing has been selected for a while, try the oldest dirty
2572 * because that's gumming up the works.
2575 if (!selected && dev->param.is_yaffs2 &&
2576 dev->gc_not_done >= (background ? 10 : 20)) {
2577 yaffs2_find_oldest_dirty_seq(dev);
2578 if (dev->oldest_dirty_block > 0) {
2579 selected = dev->oldest_dirty_block;
2580 dev->gc_dirtiest = selected;
2581 dev->oldest_dirty_gc_count++;
2582 bi = yaffs_get_block_info(dev, selected);
2583 dev->gc_pages_in_use =
2584 bi->pages_in_use - bi->soft_del_pages;
2586 dev->gc_not_done = 0;
2591 yaffs_trace(YAFFS_TRACE_GC,
2592 "GC Selected block %d with %d free, prioritised:%d",
2594 dev->param.chunks_per_block - dev->gc_pages_in_use,
2601 dev->gc_dirtiest = 0;
2602 dev->gc_pages_in_use = 0;
2603 dev->gc_not_done = 0;
2604 if (dev->refresh_skip > 0)
2605 dev->refresh_skip--;
2608 yaffs_trace(YAFFS_TRACE_GC,
2609 "GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s",
2610 dev->gc_block_finder, dev->gc_not_done, threshold,
2611 dev->gc_dirtiest, dev->gc_pages_in_use,
2612 dev->oldest_dirty_block, background ? " bg" : "");
2618 /* New garbage collector
2619 * If we're very low on erased blocks then we do aggressive garbage collection
2620 * otherwise we do "leasurely" garbage collection.
2621 * Aggressive gc looks further (whole array) and will accept less dirty blocks.
2622 * Passive gc only inspects smaller areas and only accepts more dirty blocks.
2624 * The idea is to help clear out space in a more spread-out manner.
2625 * Dunno if it really does anything useful.
2627 static int yaffs_check_gc(struct yaffs_dev *dev, int background)
2630 int gc_ok = YAFFS_OK;
2634 int checkpt_block_adjust;
2636 if (dev->param.gc_control_fn &&
2637 (dev->param.gc_control_fn(dev) & 1) == 0)
2640 if (dev->gc_disable)
2641 /* Bail out so we don't get recursive gc */
2644 /* This loop should pass the first time.
2645 * Only loops here if the collection does not increase space.
2651 checkpt_block_adjust = yaffs_calc_checkpt_blocks_required(dev);
2654 dev->param.n_reserved_blocks + checkpt_block_adjust + 1;
2656 dev->n_erased_blocks * dev->param.chunks_per_block;
2658 /* If we need a block soon then do aggressive gc. */
2659 if (dev->n_erased_blocks < min_erased)
2663 && erased_chunks > (dev->n_free_chunks / 4))
2666 if (dev->gc_skip > 20)
2668 if (erased_chunks < dev->n_free_chunks / 2 ||
2669 dev->gc_skip < 1 || background)
2679 /* If we don't already have a block being gc'd then see if we
2680 * should start another */
2682 if (dev->gc_block < 1 && !aggressive) {
2683 dev->gc_block = yaffs2_find_refresh_block(dev);
2685 dev->n_clean_ups = 0;
2687 if (dev->gc_block < 1) {
2689 yaffs_find_gc_block(dev, aggressive, background);
2691 dev->n_clean_ups = 0;
2694 if (dev->gc_block > 0) {
2697 dev->passive_gc_count++;
2699 yaffs_trace(YAFFS_TRACE_GC,
2700 "yaffs: GC n_erased_blocks %d aggressive %d",
2701 dev->n_erased_blocks, aggressive);
2703 gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive);
2706 if (dev->n_erased_blocks < (int)dev->param.n_reserved_blocks &&
2707 dev->gc_block > 0) {
2708 yaffs_trace(YAFFS_TRACE_GC,
2709 "yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d",
2710 dev->n_erased_blocks, max_tries,
2713 } while ((dev->n_erased_blocks < (int)dev->param.n_reserved_blocks) &&
2714 (dev->gc_block > 0) && (max_tries < 2));
2716 return aggressive ? gc_ok : YAFFS_OK;
2721 * Garbage collects. Intended to be called from a background thread.
2722 * Returns non-zero if at least half the free chunks are erased.
2724 int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency)
2726 int erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block;
2728 yaffs_trace(YAFFS_TRACE_BACKGROUND, "Background gc %u", urgency);
2730 yaffs_check_gc(dev, 1);
2731 return erased_chunks > dev->n_free_chunks / 2;
2734 /*-------------------- Data file manipulation -----------------*/
2736 static int yaffs_rd_data_obj(struct yaffs_obj *in, int inode_chunk, u8 * buffer)
2738 int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL);
2740 if (nand_chunk >= 0)
2741 return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk,
2744 yaffs_trace(YAFFS_TRACE_NANDACCESS,
2745 "Chunk %d not found zero instead",
2747 /* get sane (zero) data if you read a hole */
2748 memset(buffer, 0, in->my_dev->data_bytes_per_chunk);
2754 void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
2759 struct yaffs_ext_tags tags;
2760 struct yaffs_block_info *bi;
2766 block = chunk_id / dev->param.chunks_per_block;
2767 page = chunk_id % dev->param.chunks_per_block;
2769 if (!yaffs_check_chunk_bit(dev, block, page))
2770 yaffs_trace(YAFFS_TRACE_VERIFY,
2771 "Deleting invalid chunk %d", chunk_id);
2773 bi = yaffs_get_block_info(dev, block);
2775 yaffs2_update_oldest_dirty_seq(dev, block, bi);
2777 yaffs_trace(YAFFS_TRACE_DELETION,
2778 "line %d delete of chunk %d",
2781 if (!dev->param.is_yaffs2 && mark_flash &&
2782 bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) {
2784 memset(&tags, 0, sizeof(tags));
2785 tags.is_deleted = 1;
2786 yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags);
2787 yaffs_handle_chunk_update(dev, chunk_id, &tags);
2789 dev->n_unmarked_deletions++;
2792 /* Pull out of the management area.
2793 * If the whole block became dirty, this will kick off an erasure.
2795 if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING ||
2796 bi->block_state == YAFFS_BLOCK_STATE_FULL ||
2797 bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
2798 bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
2799 dev->n_free_chunks++;
2800 yaffs_clear_chunk_bit(dev, block, page);
2803 if (bi->pages_in_use == 0 &&
2804 !bi->has_shrink_hdr &&
2805 bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING &&
2806 bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCAN) {
2807 yaffs_block_became_dirty(dev, block);
2812 int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
2813 const u8 *buffer, int n_bytes, int use_reserve)
2815 /* Find old chunk Need to do this to get serial number
2816 * Write new one and patch into tree.
2817 * Invalidate old tags.
2821 struct yaffs_ext_tags prev_tags;
2823 struct yaffs_ext_tags new_tags;
2824 struct yaffs_dev *dev = in->my_dev;
2827 yaffs_check_gc(dev, 0);
2829 /* Get the previous chunk at this location in the file if it exists.
2830 * If it does not exist then put a zero into the tree. This creates
2831 * the tnode now, rather than later when it is harder to clean up.
2833 prev_chunk_id = yaffs_find_chunk_in_file(in, inode_chunk, &prev_tags);
2834 if (prev_chunk_id < 1 &&
2835 !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0))
2838 /* Set up new tags */
2839 memset(&new_tags, 0, sizeof(new_tags));
2841 new_tags.chunk_id = inode_chunk;
2842 new_tags.obj_id = in->obj_id;
2843 new_tags.serial_number =
2844 (prev_chunk_id > 0) ? prev_tags.serial_number + 1 : 1;
2845 new_tags.n_bytes = n_bytes;
2847 if (n_bytes < 1 || n_bytes > (int)dev->data_bytes_per_chunk) {
2848 yaffs_trace(YAFFS_TRACE_ERROR,
2849 "Writing %d bytes to chunk!!!!!!!!!",
2855 * If this is a data chunk and the write goes past the end of the stored
2856 * size then update the stored_size.
2858 if (inode_chunk > 0) {
2859 endpos = (inode_chunk - 1) * dev->data_bytes_per_chunk +
2861 if (in->variant.file_variant.stored_size < endpos)
2862 in->variant.file_variant.stored_size = endpos;
2866 yaffs_write_new_chunk(dev, buffer, &new_tags, use_reserve);
2868 if (new_chunk_id > 0) {
2869 yaffs_put_chunk_in_file(in, inode_chunk, new_chunk_id, 0);
2871 if (prev_chunk_id > 0)
2872 yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
2874 yaffs_verify_file_sane(in);
2876 return new_chunk_id;
2881 static int yaffs_do_xattrib_mod(struct yaffs_obj *obj, int set,
2882 const YCHAR *name, const void *value, int size,
2885 struct yaffs_xattr_mod xmod;
2893 xmod.result = -ENOSPC;
2895 result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod);
2903 static int yaffs_apply_xattrib_mod(struct yaffs_obj *obj, char *buffer,
2904 struct yaffs_xattr_mod *xmod)
2907 int x_offs = sizeof(struct yaffs_obj_hdr);
2908 struct yaffs_dev *dev = obj->my_dev;
2909 int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
2910 char *x_buffer = buffer + x_offs;
2914 nval_set(dev, x_buffer, x_size, xmod->name, xmod->data,
2915 xmod->size, xmod->flags);
2917 retval = nval_del(dev, x_buffer, x_size, xmod->name);
2919 obj->has_xattr = nval_hasvalues(dev, x_buffer, x_size);
2920 obj->xattr_known = 1;
2921 xmod->result = retval;
2926 static int yaffs_do_xattrib_fetch(struct yaffs_obj *obj, const YCHAR *name,
2927 void *value, int size)
2929 char *buffer = NULL;
2931 struct yaffs_ext_tags tags;
2932 struct yaffs_dev *dev = obj->my_dev;
2933 int x_offs = sizeof(struct yaffs_obj_hdr);
2934 int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
2938 if (obj->hdr_chunk < 1)
2941 /* If we know that the object has no xattribs then don't do all the
2942 * reading and parsing.
2944 if (obj->xattr_known && !obj->has_xattr) {
2951 buffer = (char *)yaffs_get_temp_buffer(dev);
2956 yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, (u8 *) buffer, &tags);
2958 if (result != YAFFS_OK)
2961 x_buffer = buffer + x_offs;
2963 if (!obj->xattr_known) {
2964 obj->has_xattr = nval_hasvalues(dev, x_buffer, x_size);
2965 obj->xattr_known = 1;
2969 retval = nval_get(dev, x_buffer, x_size,
2972 retval = nval_list(dev, x_buffer, x_size, value, size);
2974 yaffs_release_temp_buffer(dev, (u8 *) buffer);
2978 int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name,
2979 const void *value, int size, int flags)
2981 return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags);
2984 int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name)
2986 return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0);
2989 int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value,
2992 return yaffs_do_xattrib_fetch(obj, name, value, size);
2995 int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size)
2997 return yaffs_do_xattrib_fetch(obj, NULL, buffer, size);
3000 static void yaffs_check_obj_details_loaded(struct yaffs_obj *in)
3003 struct yaffs_obj_hdr *oh;
3004 struct yaffs_dev *dev;
3005 struct yaffs_ext_tags tags;
3008 if (!in || !in->lazy_loaded || in->hdr_chunk < 1)
3012 buf = yaffs_get_temp_buffer(dev);
3014 result = yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, buf, &tags);
3016 if (result == YAFFS_FAIL)
3019 oh = (struct yaffs_obj_hdr *)buf;
3021 yaffs_do_endian_oh(dev, oh);
3023 in->lazy_loaded = 0;
3024 in->yst_mode = oh->yst_mode;
3025 yaffs_load_attribs(in, oh);
3026 yaffs_set_obj_name_from_oh(in, oh);
3028 if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK)
3029 in->variant.symlink_variant.alias =
3030 yaffs_clone_str(oh->alias);
3031 yaffs_release_temp_buffer(dev, buf);
3034 /* UpdateObjectHeader updates the header on NAND for an object.
3035 * If name is not NULL, then that new name is used.
3037 * We're always creating the obj header from scratch (except reading
3038 * the old name) so first set up in cpu endianness then run it through
3039 * endian fixing at the end.
3041 * However, a twist: If there are xattribs we leave them as they were.
3043 * Careful! The buffer holds the whole chunk. Part of the chunk holds the
3044 * object header and the rest holds the xattribs, therefore we use a buffer
3045 * pointer and an oh pointer to point to the same memory.
3048 int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name, int force,
3049 int is_shrink, int shadows, struct yaffs_xattr_mod *xmod)
3052 struct yaffs_block_info *bi;
3053 struct yaffs_dev *dev = in->my_dev;
3058 struct yaffs_ext_tags new_tags;
3059 struct yaffs_ext_tags old_tags;
3060 const YCHAR *alias = NULL;
3062 YCHAR old_name[YAFFS_MAX_NAME_LENGTH + 1];
3063 struct yaffs_obj_hdr *oh = NULL;
3064 loff_t file_size = 0;
3066 strcpy(old_name, _Y("silly old name"));
3068 if (in->fake && in != dev->root_dir && !force && !xmod)
3071 yaffs_check_gc(dev, 0);
3072 yaffs_check_obj_details_loaded(in);
3074 buffer = yaffs_get_temp_buffer(in->my_dev);
3075 oh = (struct yaffs_obj_hdr *)buffer;
3077 prev_chunk_id = in->hdr_chunk;
3079 if (prev_chunk_id > 0) {
3080 /* Access the old obj header just to read the name. */
3081 result = yaffs_rd_chunk_tags_nand(dev, prev_chunk_id,
3083 if (result == YAFFS_OK) {
3084 yaffs_verify_oh(in, oh, &old_tags, 0);
3085 memcpy(old_name, oh->name, sizeof(oh->name));
3088 * NB We only wipe the object header area because the rest of
3089 * the buffer might contain xattribs.
3091 memset(oh, 0xff, sizeof(*oh));
3094 memset(buffer, 0xff, dev->data_bytes_per_chunk);
3097 oh->type = in->variant_type;
3098 oh->yst_mode = in->yst_mode;
3099 oh->shadows_obj = oh->inband_shadowed_obj_id = shadows;
3101 yaffs_load_attribs_oh(oh, in);
3104 oh->parent_obj_id = in->parent->obj_id;
3106 oh->parent_obj_id = 0;
3108 if (name && *name) {
3109 memset(oh->name, 0, sizeof(oh->name));
3110 yaffs_load_oh_from_name(dev, oh->name, name);
3111 } else if (prev_chunk_id > 0) {
3112 memcpy(oh->name, old_name, sizeof(oh->name));
3114 memset(oh->name, 0, sizeof(oh->name));
3117 oh->is_shrink = is_shrink;
3119 switch (in->variant_type) {
3120 case YAFFS_OBJECT_TYPE_UNKNOWN:
3121 /* Should not happen */
3123 case YAFFS_OBJECT_TYPE_FILE:
3124 if (oh->parent_obj_id != YAFFS_OBJECTID_DELETED &&
3125 oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED)
3126 file_size = in->variant.file_variant.stored_size;
3127 yaffs_oh_size_load(dev, oh, file_size, 0);
3129 case YAFFS_OBJECT_TYPE_HARDLINK:
3130 oh->equiv_id = in->variant.hardlink_variant.equiv_id;
3132 case YAFFS_OBJECT_TYPE_SPECIAL:
3135 case YAFFS_OBJECT_TYPE_DIRECTORY:
3138 case YAFFS_OBJECT_TYPE_SYMLINK:
3139 alias = in->variant.symlink_variant.alias;
3141 alias = _Y("no alias");
3142 strncpy(oh->alias, alias, YAFFS_MAX_ALIAS_LENGTH);
3143 oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
3147 /* process any xattrib modifications */
3149 yaffs_apply_xattrib_mod(in, (char *)buffer, xmod);
3152 memset(&new_tags, 0, sizeof(new_tags));
3154 new_tags.chunk_id = 0;
3155 new_tags.obj_id = in->obj_id;
3156 new_tags.serial_number = in->serial;
3158 /* Add extra info for file header */
3159 new_tags.extra_available = 1;
3160 new_tags.extra_parent_id = oh->parent_obj_id;
3161 new_tags.extra_file_size = file_size;
3162 new_tags.extra_is_shrink = oh->is_shrink;
3163 new_tags.extra_equiv_id = oh->equiv_id;
3164 new_tags.extra_shadows = (oh->shadows_obj > 0) ? 1 : 0;
3165 new_tags.extra_obj_type = in->variant_type;
3167 /* Now endian swizzle the oh if needed. */
3168 yaffs_do_endian_oh(dev, oh);
3170 yaffs_verify_oh(in, oh, &new_tags, 1);
3172 /* Create new chunk in NAND */
3174 yaffs_write_new_chunk(dev, buffer, &new_tags,
3175 (prev_chunk_id > 0) ? 1 : 0);
3178 yaffs_release_temp_buffer(dev, buffer);
3180 if (new_chunk_id < 0)
3181 return new_chunk_id;
3183 in->hdr_chunk = new_chunk_id;
3185 if (prev_chunk_id > 0)
3186 yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
3188 if (!yaffs_obj_cache_dirty(in))
3191 /* If this was a shrink, then mark the block
3192 * that the chunk lives on */
3194 bi = yaffs_get_block_info(in->my_dev,
3196 in->my_dev->param.chunks_per_block);
3197 bi->has_shrink_hdr = 1;
3201 return new_chunk_id;
3204 /*--------------------- File read/write ------------------------
3205 * Read and write have very similar structures.
3206 * In general the read/write has three parts to it
3207 * An incomplete chunk to start with (if the read/write is not chunk-aligned)
3208 * Some complete chunks
3209 * An incomplete chunk to end off with
3211 * Curve-balls: the first chunk might also be the last chunk.
3214 int yaffs_file_rd(struct yaffs_obj *in, u8 * buffer, loff_t offset, int n_bytes)
3221 struct yaffs_cache *cache;
3222 struct yaffs_dev *dev;
3227 yaffs_addr_to_chunk(dev, offset, &chunk, &start);
3230 /* OK now check for the curveball where the start and end are in
3233 if ((start + n) < dev->data_bytes_per_chunk)
3236 n_copy = dev->data_bytes_per_chunk - start;
3238 cache = yaffs_find_chunk_cache(in, chunk);
3240 /* If the chunk is already in the cache or it is less than
3241 * a whole chunk or we're using inband tags then use the cache
3242 * (if there is caching) else bypass the cache.
3244 if (cache || n_copy != (int)dev->data_bytes_per_chunk ||
3245 dev->param.inband_tags) {
3246 if (dev->param.n_caches > 0) {
3248 /* If we can't find the data in the cache,
3249 * then load it up. */
3253 yaffs_grab_chunk_cache(in->my_dev);
3255 cache->chunk_id = chunk;
3258 yaffs_rd_data_obj(in, chunk,
3263 yaffs_use_cache(dev, cache, 0);
3267 memcpy(buffer, &cache->data[start], n_copy);
3271 /* Read into the local buffer then copy.. */
3274 yaffs_get_temp_buffer(dev);
3275 yaffs_rd_data_obj(in, chunk, local_buffer);
3277 memcpy(buffer, &local_buffer[start], n_copy);
3279 yaffs_release_temp_buffer(dev, local_buffer);
3282 /* A full chunk. Read directly into the buffer. */
3283 yaffs_rd_data_obj(in, chunk, buffer);
3293 int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
3294 int n_bytes, int write_through)
3303 loff_t start_write = offset;
3304 int chunk_written = 0;
3307 struct yaffs_dev *dev;
3311 while (n > 0 && chunk_written >= 0) {
3312 yaffs_addr_to_chunk(dev, offset, &chunk, &start);
3314 if (((loff_t)chunk) *
3315 dev->data_bytes_per_chunk + start != offset ||
3316 start >= dev->data_bytes_per_chunk) {
3317 yaffs_trace(YAFFS_TRACE_ERROR,
3318 "AddrToChunk of offset %lld gives chunk %d start %d",
3319 (long long)offset, chunk, start);
3321 chunk++; /* File pos to chunk in file offset */
3323 /* OK now check for the curveball where the start and end are in
3327 if ((start + n) < dev->data_bytes_per_chunk) {
3330 /* Now calculate how many bytes to write back....
3331 * If we're overwriting and not writing to then end of
3332 * file then we need to write back as much as was there
3336 chunk_start = (((loff_t)(chunk - 1)) *
3337 dev->data_bytes_per_chunk);
3339 if (chunk_start > in->variant.file_variant.file_size)
3340 n_bytes_read = 0; /* Past end of file */
3343 in->variant.file_variant.file_size -
3346 if (n_bytes_read > dev->data_bytes_per_chunk)
3347 n_bytes_read = dev->data_bytes_per_chunk;
3351 (start + n)) ? n_bytes_read : (start + n);
3353 if (n_writeback < 0 ||
3354 n_writeback > (int)dev->data_bytes_per_chunk)
3358 n_copy = dev->data_bytes_per_chunk - start;
3359 n_writeback = dev->data_bytes_per_chunk;
3362 if (n_copy != (int)dev->data_bytes_per_chunk ||
3363 !dev->param.cache_bypass_aligned ||
3364 dev->param.inband_tags) {
3365 /* An incomplete start or end chunk (or maybe both
3366 * start and end chunk), or we're using inband tags,
3367 * or we're forcing writes through the cache,
3368 * so we want to use the cache buffers.
3370 if (dev->param.n_caches > 0) {
3371 struct yaffs_cache *cache;
3373 /* If we can't find the data in the cache, then
3375 cache = yaffs_find_chunk_cache(in, chunk);
3378 yaffs_check_alloc_available(dev, 1)) {
3379 cache = yaffs_grab_chunk_cache(dev);
3381 cache->chunk_id = chunk;
3384 yaffs_rd_data_obj(in, chunk,
3388 !yaffs_check_alloc_available(dev,