+ if (yaffs_create_obj
+ (YAFFS_OBJECT_TYPE_HARDLINK, parent, name, 0, 0, 0,
+ equiv_obj, NULL, 0)) {
+ return equiv_obj;
+ } else {
+ return NULL;
+ }
+
+}
+
+
+
+/*------------------------- Block Management and Page Allocation ----------------*/
+
+static int yaffs_init_blocks(struct yaffs_dev *dev)
+{
+ int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
+
+ dev->block_info = NULL;
+ dev->chunk_bits = NULL;
+
+ dev->alloc_block = -1; /* force it to get a new one */
+
+ /* If the first allocation strategy fails, thry the alternate one */
+ dev->block_info =
+ kmalloc(n_blocks * sizeof(struct yaffs_block_info), GFP_NOFS);
+ if (!dev->block_info) {
+ dev->block_info =
+ vmalloc(n_blocks * sizeof(struct yaffs_block_info));
+ dev->block_info_alt = 1;
+ } else {
+ dev->block_info_alt = 0;
+ }
+
+ if (dev->block_info) {
+ /* Set up dynamic blockinfo stuff. Round up bytes. */
+ dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8;
+ dev->chunk_bits =
+ kmalloc(dev->chunk_bit_stride * n_blocks, GFP_NOFS);
+ if (!dev->chunk_bits) {
+ dev->chunk_bits =
+ vmalloc(dev->chunk_bit_stride * n_blocks);
+ dev->chunk_bits_alt = 1;
+ } else {
+ dev->chunk_bits_alt = 0;
+ }
+ }
+
+ if (dev->block_info && dev->chunk_bits) {
+ memset(dev->block_info, 0,
+ n_blocks * sizeof(struct yaffs_block_info));
+ memset(dev->chunk_bits, 0, dev->chunk_bit_stride * n_blocks);
+ return YAFFS_OK;
+ }
+
+ return YAFFS_FAIL;
+}
+
+static void yaffs_deinit_blocks(struct yaffs_dev *dev)
+{
+ if (dev->block_info_alt && dev->block_info)
+ vfree(dev->block_info);
+ else if (dev->block_info)
+ kfree(dev->block_info);
+
+ dev->block_info_alt = 0;
+
+ dev->block_info = NULL;
+
+ if (dev->chunk_bits_alt && dev->chunk_bits)
+ vfree(dev->chunk_bits);
+ else if (dev->chunk_bits)
+ kfree(dev->chunk_bits);
+ dev->chunk_bits_alt = 0;
+ dev->chunk_bits = NULL;
+}
+
+void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no)
+{
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no);
+
+ int erased_ok = 0;
+
+ /* If the block is still healthy erase it and mark as clean.
+ * If the block has had a data failure, then retire it.
+ */
+
+ T(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
+ (TSTR("yaffs_block_became_dirty block %d state %d %s" TENDSTR),
+ block_no, bi->block_state,
+ (bi->needs_retiring) ? "needs retiring" : ""));
+
+ yaffs2_clear_oldest_dirty_seq(dev, bi);
+
+ bi->block_state = YAFFS_BLOCK_STATE_DIRTY;
+
+ /* If this is the block being garbage collected then stop gc'ing this block */
+ if (block_no == dev->gc_block)
+ dev->gc_block = 0;
+
+ /* If this block is currently the best candidate for gc then drop as a candidate */
+ if (block_no == dev->gc_dirtiest) {
+ dev->gc_dirtiest = 0;
+ dev->gc_pages_in_use = 0;
+ }
+
+ if (!bi->needs_retiring) {
+ yaffs2_checkpt_invalidate(dev);
+ erased_ok = yaffs_erase_block(dev, block_no);
+ if (!erased_ok) {
+ dev->n_erase_failures++;
+ T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ (TSTR("**>> Erasure failed %d" TENDSTR), block_no));
+ }
+ }
+
+ if (erased_ok &&
+ ((yaffs_trace_mask & YAFFS_TRACE_ERASE)
+ || !yaffs_skip_verification(dev))) {
+ int i;
+ for (i = 0; i < dev->param.chunks_per_block; i++) {
+ if (!yaffs_check_chunk_erased
+ (dev, block_no * dev->param.chunks_per_block + i)) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ (">>Block %d erasure supposedly OK, but chunk %d not erased"
+ TENDSTR), block_no, i));
+ }
+ }
+ }
+
+ if (erased_ok) {
+ /* Clean it up... */
+ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
+ bi->seq_number = 0;
+ dev->n_erased_blocks++;
+ bi->pages_in_use = 0;
+ bi->soft_del_pages = 0;
+ bi->has_shrink_hdr = 0;
+ bi->skip_erased_check = 1; /* This is clean, so no need to check */
+ bi->gc_prioritise = 0;
+ yaffs_clear_chunk_bits(dev, block_no);
+
+ T(YAFFS_TRACE_ERASE,
+ (TSTR("Erased block %d" TENDSTR), block_no));
+ } else {
+ dev->n_free_chunks -= dev->param.chunks_per_block; /* We lost a block of free space */
+
+ yaffs_retire_block(dev, block_no);
+ T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ (TSTR("**>> Block %d retired" TENDSTR), block_no));
+ }
+}
+
+
+
+static int yaffs_gc_block(struct yaffs_dev *dev, int block, int whole_block)
+{
+ int old_chunk;
+ int new_chunk;
+ int mark_flash;
+ int ret_val = YAFFS_OK;
+ int i;
+ int is_checkpt_block;
+ int matching_chunk;
+ int max_copies;
+
+ int chunks_before = yaffs_get_erased_chunks(dev);
+ int chunks_after;
+
+ struct yaffs_ext_tags tags;
+
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, block);
+
+ struct yaffs_obj *object;
+
+ is_checkpt_block = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT);
+
+ T(YAFFS_TRACE_TRACING,
+ (TSTR
+ ("Collecting block %d, in use %d, shrink %d, whole_block %d"
+ TENDSTR), block, bi->pages_in_use, bi->has_shrink_hdr,
+ whole_block));
+
+ /*yaffs_verify_free_chunks(dev); */
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL)
+ bi->block_state = YAFFS_BLOCK_STATE_COLLECTING;
+
+ bi->has_shrink_hdr = 0; /* clear the flag so that the block can erase */
+
+ dev->gc_disable = 1;
+
+ if (is_checkpt_block || !yaffs_still_some_chunks(dev, block)) {
+ T(YAFFS_TRACE_TRACING,
+ (TSTR
+ ("Collecting block %d that has no chunks in use" TENDSTR),
+ block));
+ yaffs_block_became_dirty(dev, block);
+ } else {
+
+ u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
+
+ yaffs_verify_blk(dev, bi, block);
+
+ max_copies = (whole_block) ? dev->param.chunks_per_block : 5;
+ old_chunk = block * dev->param.chunks_per_block + dev->gc_chunk;
+
+ for ( /* init already done */ ;
+ ret_val == YAFFS_OK &&
+ dev->gc_chunk < dev->param.chunks_per_block &&
+ (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) &&
+ max_copies > 0; dev->gc_chunk++, old_chunk++) {
+ if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) {
+
+ /* This page is in use and might need to be copied off */
+
+ max_copies--;
+
+ mark_flash = 1;
+
+ yaffs_init_tags(&tags);
+
+ yaffs_rd_chunk_tags_nand(dev, old_chunk,
+ buffer, &tags);
+
+ object = yaffs_find_by_number(dev, tags.obj_id);
+
+ T(YAFFS_TRACE_GC_DETAIL,
+ (TSTR
+ ("Collecting chunk in block %d, %d %d %d "