mi_check.c
上传用户:romrleung
上传日期:2022-05-23
资源大小:18897k
文件大小:134k
- if ((!(param->testflag & T_SILENT)))
- printf ("- Fixing index %dn",sort_param.key+1);
- sort_param.max_pos=sort_param.pos=share->pack.header_length;
- keyseg=sort_param.seg;
- bzero((char*) sort_param.unique,sizeof(sort_param.unique));
- sort_param.key_length=share->rec_reflength;
- for (i=0 ; keyseg[i].type != HA_KEYTYPE_END; i++)
- {
- sort_param.key_length+=keyseg[i].length;
- if (keyseg[i].flag & HA_SPACE_PACK)
- sort_param.key_length+=get_pack_length(keyseg[i].length);
- if (keyseg[i].flag & (HA_BLOB_PART | HA_VAR_LENGTH))
- sort_param.key_length+=2 + test(keyseg[i].length >= 127);
- if (keyseg[i].flag & HA_NULL_PART)
- sort_param.key_length++;
- }
- info->state->records=info->state->del=share->state.split=0;
- info->state->empty=0;
- if (sort_param.keyinfo->flag & HA_FULLTEXT)
- {
- uint ft_max_word_len_for_sort=FT_MAX_WORD_LEN_FOR_SORT*
- sort_param.keyinfo->seg->charset->mbmaxlen;
- sort_info.max_records=
- (ha_rows) (sort_info.filelength/ft_min_word_len+1);
- sort_param.key_read=sort_ft_key_read;
- sort_param.key_write=sort_ft_key_write;
- sort_param.key_length+=ft_max_word_len_for_sort-HA_FT_MAXBYTELEN;
- }
- else
- {
- sort_param.key_read=sort_key_read;
- sort_param.key_write=sort_key_write;
- }
- if (_create_index_by_sort(&sort_param,
- (my_bool) (!(param->testflag & T_VERBOSE)),
- (uint) param->sort_buffer_length))
- {
- param->retry_repair=1;
- goto err;
- }
- param->calc_checksum=0; /* No need to calc glob_crc */
- /* Set for next loop */
- sort_info.max_records= (ha_rows) info->state->records;
- if (param->testflag & T_STATISTICS)
- update_key_parts(sort_param.keyinfo, rec_per_key_part, sort_param.unique,
- param->stats_method == MI_STATS_METHOD_IGNORE_NULLS?
- sort_param.notnull: NULL,(ulonglong) info->state->records);
- share->state.key_map|=(ulonglong) 1 << sort_param.key;
- if (sort_param.fix_datafile)
- {
- param->read_cache.end_of_file=sort_param.filepos;
- if (write_data_suffix(&sort_info,1) || end_io_cache(&info->rec_cache))
- goto err;
- if (param->testflag & T_SAFE_REPAIR)
- {
- /* Don't repair if we loosed more than one row */
- if (info->state->records+1 < start_records)
- {
- info->state->records=start_records;
- goto err;
- }
- }
- share->state.state.data_file_length = info->state->data_file_length=
- sort_param.filepos;
- /* Only whole records */
- share->state.version=(ulong) time((time_t*) 0);
- my_close(info->dfile,MYF(0));
- info->dfile=new_file;
- share->data_file_type=sort_info.new_data_file_type;
- share->pack.header_length=(ulong) new_header_length;
- sort_param.fix_datafile=0;
- }
- else
- info->state->data_file_length=sort_param.max_pos;
- param->read_cache.file=info->dfile; /* re-init read cache */
- reinit_io_cache(¶m->read_cache,READ_CACHE,share->pack.header_length,
- 1,1);
- }
- if (param->testflag & T_WRITE_LOOP)
- {
- VOID(fputs(" r",stdout)); VOID(fflush(stdout));
- }
- if (rep_quick && del+sort_info.dupp != info->state->del)
- {
- mi_check_print_error(param,"Couldn't fix table with quick recovery: Found wrong number of deleted records");
- mi_check_print_error(param,"Run recovery again without -q");
- got_error=1;
- param->retry_repair=1;
- param->testflag|=T_RETRY_WITHOUT_QUICK;
- goto err;
- }
- if (rep_quick & T_FORCE_UNIQUENESS)
- {
- my_off_t skr=info->state->data_file_length+
- (share->options & HA_OPTION_COMPRESS_RECORD ?
- MEMMAP_EXTRA_MARGIN : 0);
- #ifdef USE_RELOC
- if (share->data_file_type == STATIC_RECORD &&
- skr < share->base.reloc*share->base.min_pack_length)
- skr=share->base.reloc*share->base.min_pack_length;
- #endif
- if (skr != sort_info.filelength && !info->s->base.raid_type)
- if (my_chsize(info->dfile,skr,0,MYF(0)))
- mi_check_print_warning(param,
- "Can't change size of datafile, error: %d",
- my_errno);
- }
- if (param->testflag & T_CALC_CHECKSUM)
- share->state.checksum=param->glob_crc;
- if (my_chsize(share->kfile,info->state->key_file_length,0,MYF(0)))
- mi_check_print_warning(param,
- "Can't change size of indexfile, error: %d",
- my_errno);
- if (!(param->testflag & T_SILENT))
- {
- if (start_records != info->state->records)
- printf("Data records: %sn", llstr(info->state->records,llbuff));
- if (sort_info.dupp)
- mi_check_print_warning(param,
- "%s records have been removed",
- llstr(sort_info.dupp,llbuff));
- }
- got_error=0;
- if (&share->state.state != info->state)
- memcpy( &share->state.state, info->state, sizeof(*info->state));
- err:
- got_error|= flush_blocks(param, share->key_cache, share->kfile);
- VOID(end_io_cache(&info->rec_cache));
- if (!got_error)
- {
- /* Replace the actual file with the temporary file */
- if (new_file >= 0)
- {
- my_close(new_file,MYF(0));
- info->dfile=new_file= -1;
- if (change_to_newfile(share->data_file_name,MI_NAME_DEXT,
- DATA_TMP_EXT, share->base.raid_chunks,
- (param->testflag & T_BACKUP_DATA ?
- MYF(MY_REDEL_MAKE_BACKUP): MYF(0))) ||
- mi_open_datafile(info,share,-1))
- got_error=1;
- }
- }
- if (got_error)
- {
- if (! param->error_printed)
- mi_check_print_error(param,"%d when fixing table",my_errno);
- if (new_file >= 0)
- {
- VOID(my_close(new_file,MYF(0)));
- VOID(my_raid_delete(param->temp_filename,share->base.raid_chunks,
- MYF(MY_WME)));
- if (info->dfile == new_file)
- info->dfile= -1;
- }
- mi_mark_crashed_on_repair(info);
- }
- else if (key_map == share->state.key_map)
- share->state.changed&= ~STATE_NOT_OPTIMIZED_KEYS;
- share->state.changed|=STATE_NOT_SORTED_PAGES;
- my_free(mi_get_rec_buff_ptr(info, sort_param.rec_buff),
- MYF(MY_ALLOW_ZERO_PTR));
- my_free(sort_param.record,MYF(MY_ALLOW_ZERO_PTR));
- my_free((gptr) sort_info.key_block,MYF(MY_ALLOW_ZERO_PTR));
- my_free((gptr) sort_info.ft_buf, MYF(MY_ALLOW_ZERO_PTR));
- my_free(sort_info.buff,MYF(MY_ALLOW_ZERO_PTR));
- VOID(end_io_cache(¶m->read_cache));
- info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED);
- if (!got_error && (param->testflag & T_UNPACK))
- {
- share->state.header.options[0]&= (uchar) ~HA_OPTION_COMPRESS_RECORD;
- share->pack.header_length=0;
- }
- DBUG_RETURN(got_error);
- }
- /*
- Threaded repair of table using sorting
- SYNOPSIS
- mi_repair_parallel()
- param Repair parameters
- info MyISAM handler to repair
- name Name of table (for warnings)
- rep_quick set to <> 0 if we should not change data file
- DESCRIPTION
- Same as mi_repair_by_sort but do it multithreaded
- Each key is handled by a separate thread.
- TODO: make a number of threads a parameter
- RESULT
- 0 ok
- <>0 Error
- */
- int mi_repair_parallel(MI_CHECK *param, register MI_INFO *info,
- const char * name, int rep_quick)
- {
- #ifndef THREAD
- return mi_repair_by_sort(param, info, name, rep_quick);
- #else
- int got_error;
- uint i,key, total_key_length, istep;
- ulong rec_length;
- ha_rows start_records;
- my_off_t new_header_length,del;
- File new_file;
- MI_SORT_PARAM *sort_param=0;
- MYISAM_SHARE *share=info->s;
- ulong *rec_per_key_part;
- HA_KEYSEG *keyseg;
- char llbuff[22];
- IO_CACHE_SHARE io_share;
- SORT_INFO sort_info;
- ulonglong key_map=share->state.key_map;
- pthread_attr_t thr_attr;
- DBUG_ENTER("mi_repair_parallel");
- start_records=info->state->records;
- got_error=1;
- new_file= -1;
- new_header_length=(param->testflag & T_UNPACK) ? 0 :
- share->pack.header_length;
- if (!(param->testflag & T_SILENT))
- {
- printf("- parallel recovering (with sort) MyISAM-table '%s'n",name);
- printf("Data records: %sn", llstr(start_records,llbuff));
- }
- param->testflag|=T_REP; /* for easy checking */
- if (info->s->options & (HA_OPTION_CHECKSUM | HA_OPTION_COMPRESS_RECORD))
- param->testflag|=T_CALC_CHECKSUM;
- bzero((char*)&sort_info,sizeof(sort_info));
- if (!(sort_info.key_block=
- alloc_key_blocks(param,
- (uint) param->sort_key_blocks,
- share->base.max_key_block_length))
- || init_io_cache(¶m->read_cache,info->dfile,
- (uint) param->read_buffer_length,
- READ_CACHE,share->pack.header_length,1,MYF(MY_WME)) ||
- (! rep_quick &&
- init_io_cache(&info->rec_cache,info->dfile,
- (uint) param->write_buffer_length,
- WRITE_CACHE,new_header_length,1,
- MYF(MY_WME | MY_WAIT_IF_FULL) & param->myf_rw)))
- goto err;
- sort_info.key_block_end=sort_info.key_block+param->sort_key_blocks;
- info->opt_flag|=WRITE_CACHE_USED;
- info->rec_cache.file=info->dfile; /* for sort_delete_record */
- if (!rep_quick)
- {
- /* Get real path for data file */
- if ((new_file=my_raid_create(fn_format(param->temp_filename,
- share->data_file_name, "",
- DATA_TMP_EXT,
- 2+4),
- 0,param->tmpfile_createflag,
- share->base.raid_type,
- share->base.raid_chunks,
- share->base.raid_chunksize,
- MYF(0))) < 0)
- {
- mi_check_print_error(param,"Can't create new tempfile: '%s'",
- param->temp_filename);
- goto err;
- }
- if (filecopy(param, new_file,info->dfile,0L,new_header_length,
- "datafile-header"))
- goto err;
- if (param->testflag & T_UNPACK)
- {
- share->options&= ~HA_OPTION_COMPRESS_RECORD;
- mi_int2store(share->state.header.options,share->options);
- }
- share->state.dellink= HA_OFFSET_ERROR;
- info->rec_cache.file=new_file;
- }
- info->update= (short) (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED);
- if (!(param->testflag & T_CREATE_MISSING_KEYS))
- {
- /*
- Flush key cache for this file if we are calling this outside
- myisamchk
- */
- flush_key_blocks(share->key_cache,share->kfile, FLUSH_IGNORE_CHANGED);
- /* Clear the pointers to the given rows */
- for (i=0 ; i < share->base.keys ; i++)
- share->state.key_root[i]= HA_OFFSET_ERROR;
- for (i=0 ; i < share->state.header.max_block_size ; i++)
- share->state.key_del[i]= HA_OFFSET_ERROR;
- info->state->key_file_length=share->base.keystart;
- }
- else
- {
- if (flush_key_blocks(share->key_cache,share->kfile, FLUSH_FORCE_WRITE))
- goto err;
- key_map= ~key_map; /* Create the missing keys */
- }
- sort_info.info=info;
- sort_info.param = param;
- set_data_file_type(&sort_info, share);
- sort_info.dupp=0;
- sort_info.buff=0;
- param->read_cache.end_of_file=sort_info.filelength=
- my_seek(param->read_cache.file,0L,MY_SEEK_END,MYF(0));
- if (share->data_file_type == DYNAMIC_RECORD)
- rec_length=max(share->base.min_pack_length+1,share->base.min_block_length);
- else if (share->data_file_type == COMPRESSED_RECORD)
- rec_length=share->base.min_block_length;
- else
- rec_length=share->base.pack_reclength;
- /*
- +1 below is required hack for parallel repair mode.
- The info->state->records value, that is compared later
- to sort_info.max_records and cannot exceed it, is
- increased in sort_key_write. In mi_repair_by_sort, sort_key_write
- is called after sort_key_read, where the comparison is performed,
- but in parallel mode master thread can call sort_key_write
- before some other repair thread calls sort_key_read.
- Furthermore I'm not even sure +1 would be enough.
- May be sort_info.max_records shold be always set to max value in
- parallel mode.
- */
- sort_info.max_records=
- ((param->testflag & T_CREATE_MISSING_KEYS) ? info->state->records + 1:
- (ha_rows) (sort_info.filelength/rec_length+1));
- del=info->state->del;
- param->glob_crc=0;
- if (param->testflag & T_CALC_CHECKSUM)
- param->calc_checksum=1;
- if (!(sort_param=(MI_SORT_PARAM *)
- my_malloc((uint) share->base.keys *
- (sizeof(MI_SORT_PARAM) + share->base.pack_reclength),
- MYF(MY_ZEROFILL))))
- {
- mi_check_print_error(param,"Not enough memory for key!");
- goto err;
- }
- total_key_length=0;
- rec_per_key_part= param->rec_per_key_part;
- info->state->records=info->state->del=share->state.split=0;
- info->state->empty=0;
- for (i=key=0, istep=1 ; key < share->base.keys ;
- rec_per_key_part+=sort_param[i].keyinfo->keysegs, i+=istep, key++)
- {
- sort_param[i].key=key;
- sort_param[i].keyinfo=share->keyinfo+key;
- sort_param[i].seg=sort_param[i].keyinfo->seg;
- if (!(((ulonglong) 1 << key) & key_map))
- {
- /* Remember old statistics for key */
- memcpy((char*) rec_per_key_part,
- (char*) (share->state.rec_per_key_part+
- (uint) (rec_per_key_part - param->rec_per_key_part)),
- sort_param[i].keyinfo->keysegs*sizeof(*rec_per_key_part));
- istep=0;
- continue;
- }
- istep=1;
- if ((!(param->testflag & T_SILENT)))
- printf ("- Fixing index %dn",key+1);
- if (sort_param[i].keyinfo->flag & HA_FULLTEXT)
- {
- sort_param[i].key_read=sort_ft_key_read;
- sort_param[i].key_write=sort_ft_key_write;
- }
- else
- {
- sort_param[i].key_read=sort_key_read;
- sort_param[i].key_write=sort_key_write;
- }
- sort_param[i].key_cmp=sort_key_cmp;
- sort_param[i].lock_in_memory=lock_memory;
- sort_param[i].tmpdir=param->tmpdir;
- sort_param[i].sort_info=&sort_info;
- sort_param[i].master=0;
- sort_param[i].fix_datafile=0;
- sort_param[i].filepos=new_header_length;
- sort_param[i].max_pos=sort_param[i].pos=share->pack.header_length;
- sort_param[i].record= (((char *)(sort_param+share->base.keys))+
- (share->base.pack_reclength * i));
- if (!mi_alloc_rec_buff(info, -1, &sort_param[i].rec_buff))
- {
- mi_check_print_error(param,"Not enough memory!");
- goto err;
- }
- sort_param[i].key_length=share->rec_reflength;
- for (keyseg=sort_param[i].seg; keyseg->type != HA_KEYTYPE_END;
- keyseg++)
- {
- sort_param[i].key_length+=keyseg->length;
- if (keyseg->flag & HA_SPACE_PACK)
- sort_param[i].key_length+=get_pack_length(keyseg->length);
- if (keyseg->flag & (HA_BLOB_PART | HA_VAR_LENGTH))
- sort_param[i].key_length+=2 + test(keyseg->length >= 127);
- if (keyseg->flag & HA_NULL_PART)
- sort_param[i].key_length++;
- }
- total_key_length+=sort_param[i].key_length;
- if (sort_param[i].keyinfo->flag & HA_FULLTEXT)
- {
- uint ft_max_word_len_for_sort=FT_MAX_WORD_LEN_FOR_SORT*
- sort_param[i].keyinfo->seg->charset->mbmaxlen;
- sort_param[i].key_length+=ft_max_word_len_for_sort-HA_FT_MAXBYTELEN;
- }
- }
- sort_info.total_keys=i;
- sort_param[0].master= 1;
- sort_param[0].fix_datafile= (my_bool)(! rep_quick);
- sort_info.got_error=0;
- pthread_mutex_init(&sort_info.mutex, MY_MUTEX_INIT_FAST);
- pthread_cond_init(&sort_info.cond, 0);
- pthread_mutex_lock(&sort_info.mutex);
- init_io_cache_share(¶m->read_cache, &io_share, i);
- (void) pthread_attr_init(&thr_attr);
- (void) pthread_attr_setdetachstate(&thr_attr,PTHREAD_CREATE_DETACHED);
- for (i=0 ; i < sort_info.total_keys ; i++)
- {
- sort_param[i].read_cache=param->read_cache;
- /*
- two approaches: the same amount of memory for each thread
- or the memory for the same number of keys for each thread...
- In the second one all the threads will fill their sort_buffers
- (and call write_keys) at the same time, putting more stress on i/o.
- */
- sort_param[i].sortbuff_size=
- #ifndef USING_SECOND_APPROACH
- param->sort_buffer_length/sort_info.total_keys;
- #else
- param->sort_buffer_length*sort_param[i].key_length/total_key_length;
- #endif
- if (pthread_create(&sort_param[i].thr, &thr_attr,
- thr_find_all_keys,
- (void *) (sort_param+i)))
- {
- mi_check_print_error(param,"Cannot start a repair thread");
- remove_io_thread(¶m->read_cache);
- sort_info.got_error=1;
- }
- else
- sort_info.threads_running++;
- }
- (void) pthread_attr_destroy(&thr_attr);
- /* waiting for all threads to finish */
- while (sort_info.threads_running)
- pthread_cond_wait(&sort_info.cond, &sort_info.mutex);
- pthread_mutex_unlock(&sort_info.mutex);
- if ((got_error= thr_write_keys(sort_param)))
- {
- param->retry_repair=1;
- goto err;
- }
- got_error=1; /* Assume the following may go wrong */
- if (sort_param[0].fix_datafile)
- {
- if (write_data_suffix(&sort_info,1) || end_io_cache(&info->rec_cache))
- goto err;
- if (param->testflag & T_SAFE_REPAIR)
- {
- /* Don't repair if we loosed more than one row */
- if (info->state->records+1 < start_records)
- {
- info->state->records=start_records;
- goto err;
- }
- }
- share->state.state.data_file_length= info->state->data_file_length=
- sort_param->filepos;
- /* Only whole records */
- share->state.version=(ulong) time((time_t*) 0);
- my_close(info->dfile,MYF(0));
- info->dfile=new_file;
- share->data_file_type=sort_info.new_data_file_type;
- share->pack.header_length=(ulong) new_header_length;
- }
- else
- info->state->data_file_length=sort_param->max_pos;
- if (rep_quick && del+sort_info.dupp != info->state->del)
- {
- mi_check_print_error(param,"Couldn't fix table with quick recovery: Found wrong number of deleted records");
- mi_check_print_error(param,"Run recovery again without -q");
- param->retry_repair=1;
- param->testflag|=T_RETRY_WITHOUT_QUICK;
- goto err;
- }
- if (rep_quick & T_FORCE_UNIQUENESS)
- {
- my_off_t skr=info->state->data_file_length+
- (share->options & HA_OPTION_COMPRESS_RECORD ?
- MEMMAP_EXTRA_MARGIN : 0);
- #ifdef USE_RELOC
- if (share->data_file_type == STATIC_RECORD &&
- skr < share->base.reloc*share->base.min_pack_length)
- skr=share->base.reloc*share->base.min_pack_length;
- #endif
- if (skr != sort_info.filelength && !info->s->base.raid_type)
- if (my_chsize(info->dfile,skr,0,MYF(0)))
- mi_check_print_warning(param,
- "Can't change size of datafile, error: %d",
- my_errno);
- }
- if (param->testflag & T_CALC_CHECKSUM)
- share->state.checksum=param->glob_crc;
- if (my_chsize(share->kfile,info->state->key_file_length,0,MYF(0)))
- mi_check_print_warning(param,
- "Can't change size of indexfile, error: %d", my_errno);
- if (!(param->testflag & T_SILENT))
- {
- if (start_records != info->state->records)
- printf("Data records: %sn", llstr(info->state->records,llbuff));
- if (sort_info.dupp)
- mi_check_print_warning(param,
- "%s records have been removed",
- llstr(sort_info.dupp,llbuff));
- }
- got_error=0;
- if (&share->state.state != info->state)
- memcpy(&share->state.state, info->state, sizeof(*info->state));
- err:
- got_error|= flush_blocks(param, share->key_cache, share->kfile);
- VOID(end_io_cache(&info->rec_cache));
- if (!got_error)
- {
- /* Replace the actual file with the temporary file */
- if (new_file >= 0)
- {
- my_close(new_file,MYF(0));
- info->dfile=new_file= -1;
- if (change_to_newfile(share->data_file_name,MI_NAME_DEXT,
- DATA_TMP_EXT, share->base.raid_chunks,
- (param->testflag & T_BACKUP_DATA ?
- MYF(MY_REDEL_MAKE_BACKUP): MYF(0))) ||
- mi_open_datafile(info,share,-1))
- got_error=1;
- }
- }
- if (got_error)
- {
- if (! param->error_printed)
- mi_check_print_error(param,"%d when fixing table",my_errno);
- if (new_file >= 0)
- {
- VOID(my_close(new_file,MYF(0)));
- VOID(my_raid_delete(param->temp_filename,share->base.raid_chunks,
- MYF(MY_WME)));
- if (info->dfile == new_file)
- info->dfile= -1;
- }
- mi_mark_crashed_on_repair(info);
- }
- else if (key_map == share->state.key_map)
- share->state.changed&= ~STATE_NOT_OPTIMIZED_KEYS;
- share->state.changed|=STATE_NOT_SORTED_PAGES;
- pthread_cond_destroy (&sort_info.cond);
- pthread_mutex_destroy(&sort_info.mutex);
- my_free((gptr) sort_info.ft_buf, MYF(MY_ALLOW_ZERO_PTR));
- my_free((gptr) sort_info.key_block,MYF(MY_ALLOW_ZERO_PTR));
- my_free((gptr) sort_param,MYF(MY_ALLOW_ZERO_PTR));
- my_free(sort_info.buff,MYF(MY_ALLOW_ZERO_PTR));
- VOID(end_io_cache(¶m->read_cache));
- info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED);
- if (!got_error && (param->testflag & T_UNPACK))
- {
- share->state.header.options[0]&= (uchar) ~HA_OPTION_COMPRESS_RECORD;
- share->pack.header_length=0;
- }
- DBUG_RETURN(got_error);
- #endif /* THREAD */
- }
- /* Read next record and return next key */
- static int sort_key_read(MI_SORT_PARAM *sort_param, void *key)
- {
- int error;
- SORT_INFO *sort_info=sort_param->sort_info;
- MI_INFO *info=sort_info->info;
- DBUG_ENTER("sort_key_read");
- if ((error=sort_get_next_record(sort_param)))
- DBUG_RETURN(error);
- if (info->state->records == sort_info->max_records)
- {
- mi_check_print_error(sort_info->param,
- "Key %d - Found too many records; Can't continue",
- sort_param->key+1);
- DBUG_RETURN(1);
- }
- sort_param->real_key_length=
- (info->s->rec_reflength+
- _mi_make_key(info, sort_param->key, (uchar*) key,
- sort_param->record, sort_param->filepos));
- #ifdef HAVE_purify
- bzero(key+sort_param->real_key_length,
- (sort_param->key_length-sort_param->real_key_length));
- #endif
- DBUG_RETURN(sort_write_record(sort_param));
- } /* sort_key_read */
- static int sort_ft_key_read(MI_SORT_PARAM *sort_param, void *key)
- {
- int error;
- SORT_INFO *sort_info=sort_param->sort_info;
- MI_INFO *info=sort_info->info;
- FT_WORD *wptr=0;
- DBUG_ENTER("sort_ft_key_read");
- if (!sort_param->wordlist)
- {
- for (;;)
- {
- my_free((char*) wptr, MYF(MY_ALLOW_ZERO_PTR));
- if ((error=sort_get_next_record(sort_param)))
- DBUG_RETURN(error);
- if (!(wptr=_mi_ft_parserecord(info,sort_param->key,sort_param->record)))
- DBUG_RETURN(1);
- if (wptr->pos)
- break;
- error=sort_write_record(sort_param);
- }
- sort_param->wordptr=sort_param->wordlist=wptr;
- }
- else
- {
- error=0;
- wptr=(FT_WORD*)(sort_param->wordptr);
- }
- sort_param->real_key_length=(info->s->rec_reflength+
- _ft_make_key(info, sort_param->key,
- key, wptr++, sort_param->filepos));
- #ifdef HAVE_purify
- if (sort_param->key_length > sort_param->real_key_length)
- bzero(key+sort_param->real_key_length,
- (sort_param->key_length-sort_param->real_key_length));
- #endif
- if (!wptr->pos)
- {
- my_free((char*) sort_param->wordlist, MYF(0));
- sort_param->wordlist=0;
- error=sort_write_record(sort_param);
- }
- else
- sort_param->wordptr=(void*)wptr;
- DBUG_RETURN(error);
- } /* sort_ft_key_read */
- /* Read next record from file using parameters in sort_info */
- /* Return -1 if end of file, 0 if ok and > 0 if error */
- static int sort_get_next_record(MI_SORT_PARAM *sort_param)
- {
- int searching;
- uint found_record,b_type,left_length;
- my_off_t pos;
- byte *to;
- MI_BLOCK_INFO block_info;
- SORT_INFO *sort_info=sort_param->sort_info;
- MI_CHECK *param=sort_info->param;
- MI_INFO *info=sort_info->info;
- MYISAM_SHARE *share=info->s;
- char llbuff[22],llbuff2[22];
- DBUG_ENTER("sort_get_next_record");
- if (*killed_ptr(param))
- DBUG_RETURN(1);
- switch (share->data_file_type) {
- case STATIC_RECORD:
- for (;;)
- {
- if (my_b_read(&sort_param->read_cache,sort_param->record,
- share->base.pack_reclength))
- {
- if (sort_param->read_cache.error)
- param->out_flag |= O_DATA_LOST;
- param->retry_repair=1;
- param->testflag|=T_RETRY_WITHOUT_QUICK;
- DBUG_RETURN(-1);
- }
- sort_param->start_recpos=sort_param->pos;
- if (!sort_param->fix_datafile)
- {
- sort_param->filepos=sort_param->pos;
- if (sort_param->master)
- share->state.split++;
- }
- sort_param->max_pos=(sort_param->pos+=share->base.pack_reclength);
- if (*sort_param->record)
- {
- if (param->calc_checksum)
- param->glob_crc+= (info->checksum=
- mi_static_checksum(info,sort_param->record));
- DBUG_RETURN(0);
- }
- if (!sort_param->fix_datafile && sort_param->master)
- {
- info->state->del++;
- info->state->empty+=share->base.pack_reclength;
- }
- }
- case DYNAMIC_RECORD:
- LINT_INIT(to);
- pos=sort_param->pos;
- searching=(sort_param->fix_datafile && (param->testflag & T_EXTEND));
- for (;;)
- {
- found_record=block_info.second_read= 0;
- left_length=1;
- if (searching)
- {
- pos=MY_ALIGN(pos,MI_DYN_ALIGN_SIZE);
- param->testflag|=T_RETRY_WITHOUT_QUICK;
- sort_param->start_recpos=pos;
- }
- do
- {
- if (pos > sort_param->max_pos)
- sort_param->max_pos=pos;
- if (pos & (MI_DYN_ALIGN_SIZE-1))
- {
- if ((param->testflag & T_VERBOSE) || searching == 0)
- mi_check_print_info(param,"Wrong aligned block at %s",
- llstr(pos,llbuff));
- if (searching)
- goto try_next;
- }
- if (found_record && pos == param->search_after_block)
- mi_check_print_info(param,"Block: %s used by record at %s",
- llstr(param->search_after_block,llbuff),
- llstr(sort_param->start_recpos,llbuff2));
- if (_mi_read_cache(&sort_param->read_cache,
- (byte*) block_info.header,pos,
- MI_BLOCK_INFO_HEADER_LENGTH,
- (! found_record ? READING_NEXT : 0) |
- READING_HEADER))
- {
- if (found_record)
- {
- mi_check_print_info(param,
- "Can't read whole record at %s (errno: %d)",
- llstr(sort_param->start_recpos,llbuff),errno);
- goto try_next;
- }
- DBUG_RETURN(-1);
- }
- if (searching && ! sort_param->fix_datafile)
- {
- param->error_printed=1;
- param->retry_repair=1;
- param->testflag|=T_RETRY_WITHOUT_QUICK;
- DBUG_RETURN(1); /* Something wrong with data */
- }
- b_type=_mi_get_block_info(&block_info,-1,pos);
- if ((b_type & (BLOCK_ERROR | BLOCK_FATAL_ERROR)) ||
- ((b_type & BLOCK_FIRST) &&
- (block_info.rec_len < (uint) share->base.min_pack_length ||
- block_info.rec_len > (uint) share->base.max_pack_length)))
- {
- uint i;
- if (param->testflag & T_VERBOSE || searching == 0)
- mi_check_print_info(param,
- "Wrong bytesec: %3d-%3d-%3d at %10s; Skipped",
- block_info.header[0],block_info.header[1],
- block_info.header[2],llstr(pos,llbuff));
- if (found_record)
- goto try_next;
- block_info.second_read=0;
- searching=1;
- /* Search after block in read header string */
- for (i=MI_DYN_ALIGN_SIZE ;
- i < MI_BLOCK_INFO_HEADER_LENGTH ;
- i+= MI_DYN_ALIGN_SIZE)
- if (block_info.header[i] >= 1 &&
- block_info.header[i] <= MI_MAX_DYN_HEADER_BYTE)
- break;
- pos+=(ulong) i;
- sort_param->start_recpos=pos;
- continue;
- }
- if (b_type & BLOCK_DELETED)
- {
- bool error=0;
- if (block_info.block_len+ (uint) (block_info.filepos-pos) <
- share->base.min_block_length)
- {
- if (!searching)
- mi_check_print_info(param,
- "Deleted block with impossible length %u at %s",
- block_info.block_len,llstr(pos,llbuff));
- error=1;
- }
- else
- {
- if ((block_info.next_filepos != HA_OFFSET_ERROR &&
- block_info.next_filepos >=
- info->state->data_file_length) ||
- (block_info.prev_filepos != HA_OFFSET_ERROR &&
- block_info.prev_filepos >= info->state->data_file_length))
- {
- if (!searching)
- mi_check_print_info(param,
- "Delete link points outside datafile at %s",
- llstr(pos,llbuff));
- error=1;
- }
- }
- if (error)
- {
- if (found_record)
- goto try_next;
- searching=1;
- pos+= MI_DYN_ALIGN_SIZE;
- sort_param->start_recpos=pos;
- block_info.second_read=0;
- continue;
- }
- }
- else
- {
- if (block_info.block_len+ (uint) (block_info.filepos-pos) <
- share->base.min_block_length ||
- block_info.block_len > (uint) share->base.max_pack_length+
- MI_SPLIT_LENGTH)
- {
- if (!searching)
- mi_check_print_info(param,
- "Found block with impossible length %u at %s; Skipped",
- block_info.block_len+ (uint) (block_info.filepos-pos),
- llstr(pos,llbuff));
- if (found_record)
- goto try_next;
- searching=1;
- pos+= MI_DYN_ALIGN_SIZE;
- sort_param->start_recpos=pos;
- block_info.second_read=0;
- continue;
- }
- }
- if (b_type & (BLOCK_DELETED | BLOCK_SYNC_ERROR))
- {
- if (!sort_param->fix_datafile && sort_param->master &&
- (b_type & BLOCK_DELETED))
- {
- info->state->empty+=block_info.block_len;
- info->state->del++;
- share->state.split++;
- }
- if (found_record)
- goto try_next;
- if (searching)
- {
- pos+=MI_DYN_ALIGN_SIZE;
- sort_param->start_recpos=pos;
- }
- else
- pos=block_info.filepos+block_info.block_len;
- block_info.second_read=0;
- continue;
- }
- if (!sort_param->fix_datafile && sort_param->master)
- share->state.split++;
- if (! found_record++)
- {
- sort_param->find_length=left_length=block_info.rec_len;
- sort_param->start_recpos=pos;
- if (!sort_param->fix_datafile)
- sort_param->filepos=sort_param->start_recpos;
- if (sort_param->fix_datafile && (param->testflag & T_EXTEND))
- sort_param->pos=block_info.filepos+1;
- else
- sort_param->pos=block_info.filepos+block_info.block_len;
- if (share->base.blobs)
- {
- if (!(to=mi_alloc_rec_buff(info,block_info.rec_len,
- &(sort_param->rec_buff))))
- {
- if (param->max_record_length >= block_info.rec_len)
- {
- mi_check_print_error(param,"Not enough memory for blob at %s (need %lu)",
- llstr(sort_param->start_recpos,llbuff),
- (ulong) block_info.rec_len);
- DBUG_RETURN(1);
- }
- else
- {
- mi_check_print_info(param,"Not enough memory for blob at %s (need %lu); Row skipped",
- llstr(sort_param->start_recpos,llbuff),
- (ulong) block_info.rec_len);
- goto try_next;
- }
- }
- }
- else
- to= sort_param->rec_buff;
- }
- if (left_length < block_info.data_len || ! block_info.data_len)
- {
- mi_check_print_info(param,
- "Found block with too small length at %s; Skipped",
- llstr(sort_param->start_recpos,llbuff));
- goto try_next;
- }
- if (block_info.filepos + block_info.data_len >
- sort_param->read_cache.end_of_file)
- {
- mi_check_print_info(param,
- "Found block that points outside data file at %s",
- llstr(sort_param->start_recpos,llbuff));
- goto try_next;
- }
- if (_mi_read_cache(&sort_param->read_cache,to,block_info.filepos,
- block_info.data_len,
- (found_record == 1 ? READING_NEXT : 0)))
- {
- mi_check_print_info(param,
- "Read error for block at: %s (error: %d); Skipped",
- llstr(block_info.filepos,llbuff),my_errno);
- goto try_next;
- }
- left_length-=block_info.data_len;
- to+=block_info.data_len;
- pos=block_info.next_filepos;
- if (pos == HA_OFFSET_ERROR && left_length)
- {
- mi_check_print_info(param,"Wrong block with wrong total length starting at %s",
- llstr(sort_param->start_recpos,llbuff));
- goto try_next;
- }
- if (pos + MI_BLOCK_INFO_HEADER_LENGTH > sort_param->read_cache.end_of_file)
- {
- mi_check_print_info(param,"Found link that points at %s (outside data file) at %s",
- llstr(pos,llbuff2),
- llstr(sort_param->start_recpos,llbuff));
- goto try_next;
- }
- } while (left_length);
- if (_mi_rec_unpack(info,sort_param->record,sort_param->rec_buff,
- sort_param->find_length) != MY_FILE_ERROR)
- {
- if (sort_param->read_cache.error < 0)
- DBUG_RETURN(1);
- if (info->s->calc_checksum)
- info->checksum=mi_checksum(info,sort_param->record);
- if ((param->testflag & (T_EXTEND | T_REP)) || searching)
- {
- if (_mi_rec_check(info, sort_param->record, sort_param->rec_buff,
- sort_param->find_length,
- (param->testflag & T_QUICK) &&
- test(info->s->calc_checksum)))
- {
- mi_check_print_info(param,"Found wrong packed record at %s",
- llstr(sort_param->start_recpos,llbuff));
- goto try_next;
- }
- }
- if (param->calc_checksum)
- param->glob_crc+= info->checksum;
- DBUG_RETURN(0);
- }
- if (!searching)
- mi_check_print_info(param,"Key %d - Found wrong stored record at %s",
- sort_param->key+1,
- llstr(sort_param->start_recpos,llbuff));
- try_next:
- pos=(sort_param->start_recpos+=MI_DYN_ALIGN_SIZE);
- searching=1;
- }
- case COMPRESSED_RECORD:
- for (searching=0 ;; searching=1, sort_param->pos++)
- {
- if (_mi_read_cache(&sort_param->read_cache,(byte*) block_info.header,
- sort_param->pos,
- share->pack.ref_length,READING_NEXT))
- DBUG_RETURN(-1);
- if (searching && ! sort_param->fix_datafile)
- {
- param->error_printed=1;
- param->retry_repair=1;
- param->testflag|=T_RETRY_WITHOUT_QUICK;
- DBUG_RETURN(1); /* Something wrong with data */
- }
- sort_param->start_recpos=sort_param->pos;
- if (_mi_pack_get_block_info(info,&block_info,-1,sort_param->pos))
- DBUG_RETURN(-1);
- if (!block_info.rec_len &&
- sort_param->pos + MEMMAP_EXTRA_MARGIN ==
- sort_param->read_cache.end_of_file)
- DBUG_RETURN(-1);
- if (block_info.rec_len < (uint) share->min_pack_length ||
- block_info.rec_len > (uint) share->max_pack_length)
- {
- if (! searching)
- mi_check_print_info(param,"Found block with wrong recordlength: %d at %sn",
- block_info.rec_len,
- llstr(sort_param->pos,llbuff));
- continue;
- }
- if (_mi_read_cache(&sort_param->read_cache,(byte*) sort_param->rec_buff,
- block_info.filepos, block_info.rec_len,
- READING_NEXT))
- {
- if (! searching)
- mi_check_print_info(param,"Couldn't read whole record from %s",
- llstr(sort_param->pos,llbuff));
- continue;
- }
- if (_mi_pack_rec_unpack(info,sort_param->record,sort_param->rec_buff,
- block_info.rec_len))
- {
- if (! searching)
- mi_check_print_info(param,"Found wrong record at %s",
- llstr(sort_param->pos,llbuff));
- continue;
- }
- info->checksum=mi_checksum(info,sort_param->record);
- if (!sort_param->fix_datafile)
- {
- sort_param->filepos=sort_param->pos;
- if (sort_param->master)
- share->state.split++;
- }
- sort_param->max_pos=(sort_param->pos=block_info.filepos+
- block_info.rec_len);
- info->packed_length=block_info.rec_len;
- if (param->calc_checksum)
- param->glob_crc+= info->checksum;
- DBUG_RETURN(0);
- }
- }
- DBUG_RETURN(1); /* Impossible */
- }
- /* Write record to new file */
- int sort_write_record(MI_SORT_PARAM *sort_param)
- {
- int flag;
- uint length;
- ulong block_length,reclength;
- byte *from;
- byte block_buff[8];
- SORT_INFO *sort_info=sort_param->sort_info;
- MI_CHECK *param=sort_info->param;
- MI_INFO *info=sort_info->info;
- MYISAM_SHARE *share=info->s;
- DBUG_ENTER("sort_write_record");
- if (sort_param->fix_datafile)
- {
- switch (sort_info->new_data_file_type) {
- case STATIC_RECORD:
- if (my_b_write(&info->rec_cache,sort_param->record,
- share->base.pack_reclength))
- {
- mi_check_print_error(param,"%d when writing to datafile",my_errno);
- DBUG_RETURN(1);
- }
- sort_param->filepos+=share->base.pack_reclength;
- info->s->state.split++;
- /* sort_info->param->glob_crc+=mi_static_checksum(info, sort_param->record); */
- break;
- case DYNAMIC_RECORD:
- if (! info->blobs)
- from=sort_param->rec_buff;
- else
- {
- /* must be sure that local buffer is big enough */
- reclength=info->s->base.pack_reclength+
- _my_calc_total_blob_length(info,sort_param->record)+
- ALIGN_SIZE(MI_MAX_DYN_BLOCK_HEADER)+MI_SPLIT_LENGTH+
- MI_DYN_DELETE_BLOCK_HEADER;
- if (sort_info->buff_length < reclength)
- {
- if (!(sort_info->buff=my_realloc(sort_info->buff, (uint) reclength,
- MYF(MY_FREE_ON_ERROR |
- MY_ALLOW_ZERO_PTR))))
- DBUG_RETURN(1);
- sort_info->buff_length=reclength;
- }
- from=sort_info->buff+ALIGN_SIZE(MI_MAX_DYN_BLOCK_HEADER);
- }
- info->checksum=mi_checksum(info,sort_param->record);
- reclength=_mi_rec_pack(info,from,sort_param->record);
- flag=0;
- /* sort_info->param->glob_crc+=info->checksum; */
- do
- {
- block_length=reclength+ 3 + test(reclength >= (65520-3));
- if (block_length < share->base.min_block_length)
- block_length=share->base.min_block_length;
- info->update|=HA_STATE_WRITE_AT_END;
- block_length=MY_ALIGN(block_length,MI_DYN_ALIGN_SIZE);
- if (block_length > MI_MAX_BLOCK_LENGTH)
- block_length=MI_MAX_BLOCK_LENGTH;
- if (_mi_write_part_record(info,0L,block_length,
- sort_param->filepos+block_length,
- &from,&reclength,&flag))
- {
- mi_check_print_error(param,"%d when writing to datafile",my_errno);
- DBUG_RETURN(1);
- }
- sort_param->filepos+=block_length;
- info->s->state.split++;
- } while (reclength);
- /* sort_info->param->glob_crc+=info->checksum; */
- break;
- case COMPRESSED_RECORD:
- reclength=info->packed_length;
- length= save_pack_length((uint) share->pack.version, block_buff,
- reclength);
- if (info->s->base.blobs)
- length+= save_pack_length((uint) share->pack.version,
- block_buff + length, info->blob_length);
- if (my_b_write(&info->rec_cache,block_buff,length) ||
- my_b_write(&info->rec_cache,(byte*) sort_param->rec_buff,reclength))
- {
- mi_check_print_error(param,"%d when writing to datafile",my_errno);
- DBUG_RETURN(1);
- }
- /* sort_info->param->glob_crc+=info->checksum; */
- sort_param->filepos+=reclength+length;
- info->s->state.split++;
- break;
- }
- }
- if (sort_param->master)
- {
- info->state->records++;
- if ((param->testflag & T_WRITE_LOOP) &&
- (info->state->records % WRITE_COUNT) == 0)
- {
- char llbuff[22];
- printf("%sr", llstr(info->state->records,llbuff));
- VOID(fflush(stdout));
- }
- }
- DBUG_RETURN(0);
- } /* sort_write_record */
- /* Compare two keys from _create_index_by_sort */
- static int sort_key_cmp(MI_SORT_PARAM *sort_param, const void *a,
- const void *b)
- {
- uint not_used[2];
- return (ha_key_cmp(sort_param->seg, *((uchar**) a), *((uchar**) b),
- USE_WHOLE_KEY, SEARCH_SAME, not_used));
- } /* sort_key_cmp */
- static int sort_key_write(MI_SORT_PARAM *sort_param, const void *a)
- {
- uint diff_pos[2];
- char llbuff[22],llbuff2[22];
- SORT_INFO *sort_info=sort_param->sort_info;
- MI_CHECK *param= sort_info->param;
- int cmp;
- if (sort_info->key_block->inited)
- {
- cmp=ha_key_cmp(sort_param->seg,sort_info->key_block->lastkey,
- (uchar*) a, USE_WHOLE_KEY,SEARCH_FIND | SEARCH_UPDATE,
- diff_pos);
- if (param->stats_method == MI_STATS_METHOD_NULLS_NOT_EQUAL)
- ha_key_cmp(sort_param->seg,sort_info->key_block->lastkey,
- (uchar*) a, USE_WHOLE_KEY,
- SEARCH_FIND | SEARCH_NULL_ARE_NOT_EQUAL, diff_pos);
- else if (param->stats_method == MI_STATS_METHOD_IGNORE_NULLS)
- {
- diff_pos[0]= mi_collect_stats_nonulls_next(sort_param->seg,
- sort_param->notnull,
- sort_info->key_block->lastkey,
- (uchar*)a);
- }
- sort_param->unique[diff_pos[0]-1]++;
- }
- else
- {
- cmp= -1;
- if (param->stats_method == MI_STATS_METHOD_IGNORE_NULLS)
- mi_collect_stats_nonulls_first(sort_param->seg, sort_param->notnull,
- (uchar*)a);
- }
- if ((sort_param->keyinfo->flag & HA_NOSAME) && cmp == 0)
- {
- sort_info->dupp++;
- sort_info->info->lastpos=get_record_for_key(sort_info->info,
- sort_param->keyinfo,
- (uchar*) a);
- mi_check_print_warning(param,
- "Duplicate key for record at %10s against record at %10s",
- llstr(sort_info->info->lastpos,llbuff),
- llstr(get_record_for_key(sort_info->info,
- sort_param->keyinfo,
- sort_info->key_block->
- lastkey),
- llbuff2));
- param->testflag|=T_RETRY_WITHOUT_QUICK;
- if (sort_info->param->testflag & T_VERBOSE)
- _mi_print_key(stdout,sort_param->seg,(uchar*) a, USE_WHOLE_KEY);
- return (sort_delete_record(sort_param));
- }
- #ifndef DBUG_OFF
- if (cmp > 0)
- {
- mi_check_print_error(param,
- "Internal error: Keys are not in order from sort");
- return(1);
- }
- #endif
- return (sort_insert_key(sort_param,sort_info->key_block,
- (uchar*) a, HA_OFFSET_ERROR));
- } /* sort_key_write */
- int sort_ft_buf_flush(MI_SORT_PARAM *sort_param)
- {
- SORT_INFO *sort_info=sort_param->sort_info;
- SORT_KEY_BLOCKS *key_block=sort_info->key_block;
- MYISAM_SHARE *share=sort_info->info->s;
- uint val_off, val_len;
- int error;
- SORT_FT_BUF *ft_buf=sort_info->ft_buf;
- uchar *from, *to;
- val_len=share->ft2_keyinfo.keylength;
- get_key_full_length_rdonly(val_off, ft_buf->lastkey);
- to=ft_buf->lastkey+val_off;
- if (ft_buf->buf)
- {
- /* flushing first-level tree */
- error=sort_insert_key(sort_param,key_block,ft_buf->lastkey,
- HA_OFFSET_ERROR);
- for (from=to+val_len;
- !error && from < ft_buf->buf;
- from+= val_len)
- {
- memcpy(to, from, val_len);
- error=sort_insert_key(sort_param,key_block,ft_buf->lastkey,
- HA_OFFSET_ERROR);
- }
- return error;
- }
- /* flushing second-level tree keyblocks */
- error=flush_pending_blocks(sort_param);
- /* updating lastkey with second-level tree info */
- ft_intXstore(ft_buf->lastkey+val_off, -ft_buf->count);
- _mi_dpointer(sort_info->info, ft_buf->lastkey+val_off+HA_FT_WLEN,
- share->state.key_root[sort_param->key]);
- /* restoring first level tree data in sort_info/sort_param */
- sort_info->key_block=sort_info->key_block_end- sort_info->param->sort_key_blocks;
- sort_param->keyinfo=share->keyinfo+sort_param->key;
- share->state.key_root[sort_param->key]=HA_OFFSET_ERROR;
- /* writing lastkey in first-level tree */
- return error ? error :
- sort_insert_key(sort_param,sort_info->key_block,
- ft_buf->lastkey,HA_OFFSET_ERROR);
- }
- static int sort_ft_key_write(MI_SORT_PARAM *sort_param, const void *a)
- {
- uint a_len, val_off, val_len, error;
- uchar *p;
- SORT_INFO *sort_info=sort_param->sort_info;
- SORT_FT_BUF *ft_buf=sort_info->ft_buf;
- SORT_KEY_BLOCKS *key_block=sort_info->key_block;
- val_len=HA_FT_WLEN+sort_info->info->s->base.rec_reflength;
- get_key_full_length_rdonly(a_len, (uchar *)a);
- if (!ft_buf)
- {
- /*
- use two-level tree only if key_reflength fits in rec_reflength place
- and row format is NOT static - for _mi_dpointer not to garble offsets
- */
- if ((sort_info->info->s->base.key_reflength <=
- sort_info->info->s->base.rec_reflength) &&
- (sort_info->info->s->options &
- (HA_OPTION_PACK_RECORD | HA_OPTION_COMPRESS_RECORD)))
- ft_buf=(SORT_FT_BUF *)my_malloc(sort_param->keyinfo->block_length +
- sizeof(SORT_FT_BUF), MYF(MY_WME));
- if (!ft_buf)
- {
- sort_param->key_write=sort_key_write;
- return sort_key_write(sort_param, a);
- }
- sort_info->ft_buf=ft_buf;
- goto word_init_ft_buf; /* no need to duplicate the code */
- }
- get_key_full_length_rdonly(val_off, ft_buf->lastkey);
- if (mi_compare_text(sort_param->seg->charset,
- ((uchar *)a)+1,a_len-1,
- ft_buf->lastkey+1,val_off-1, 0, 0)==0)
- {
- if (!ft_buf->buf) /* store in second-level tree */
- {
- ft_buf->count++;
- return sort_insert_key(sort_param,key_block,
- ((uchar *)a)+a_len, HA_OFFSET_ERROR);
- }
- /* storing the key in the buffer. */
- memcpy (ft_buf->buf, (char *)a+a_len, val_len);
- ft_buf->buf+=val_len;
- if (ft_buf->buf < ft_buf->end)
- return 0;
- /* converting to two-level tree */
- p=ft_buf->lastkey+val_off;
- while (key_block->inited)
- key_block++;
- sort_info->key_block=key_block;
- sort_param->keyinfo=& sort_info->info->s->ft2_keyinfo;
- ft_buf->count=(ft_buf->buf - p)/val_len;
- /* flushing buffer to second-level tree */
- for (error=0; !error && p < ft_buf->buf; p+= val_len)
- error=sort_insert_key(sort_param,key_block,p,HA_OFFSET_ERROR);
- ft_buf->buf=0;
- return error;
- }
- /* flushing buffer */
- if ((error=sort_ft_buf_flush(sort_param)))
- return error;
- word_init_ft_buf:
- a_len+=val_len;
- memcpy(ft_buf->lastkey, a, a_len);
- ft_buf->buf=ft_buf->lastkey+a_len;
- /*
- 32 is just a safety margin here
- (at least max(val_len, sizeof(nod_flag)) should be there).
- May be better performance could be achieved if we'd put
- (sort_info->keyinfo->block_length-32)/XXX
- instead.
- TODO: benchmark the best value for XXX.
- */
- ft_buf->end=ft_buf->lastkey+ (sort_param->keyinfo->block_length-32);
- return 0;
- } /* sort_ft_key_write */
- /* get pointer to record from a key */
- static my_off_t get_record_for_key(MI_INFO *info, MI_KEYDEF *keyinfo,
- uchar *key)
- {
- return _mi_dpos(info,0,key+_mi_keylength(keyinfo,key));
- } /* get_record_for_key */
- /* Insert a key in sort-key-blocks */
- static int sort_insert_key(MI_SORT_PARAM *sort_param,
- register SORT_KEY_BLOCKS *key_block, uchar *key,
- my_off_t prev_block)
- {
- uint a_length,t_length,nod_flag;
- my_off_t filepos,key_file_length;
- uchar *anc_buff,*lastkey;
- MI_KEY_PARAM s_temp;
- MI_INFO *info;
- MI_KEYDEF *keyinfo=sort_param->keyinfo;
- SORT_INFO *sort_info= sort_param->sort_info;
- MI_CHECK *param=sort_info->param;
- DBUG_ENTER("sort_insert_key");
- anc_buff=key_block->buff;
- info=sort_info->info;
- lastkey=key_block->lastkey;
- nod_flag= (key_block == sort_info->key_block ? 0 :
- info->s->base.key_reflength);
- if (!key_block->inited)
- {
- key_block->inited=1;
- if (key_block == sort_info->key_block_end)
- {
- mi_check_print_error(param,"To many key-block-levels; Try increasing sort_key_blocks");
- DBUG_RETURN(1);
- }
- a_length=2+nod_flag;
- key_block->end_pos=anc_buff+2;
- lastkey=0; /* No previous key in block */
- }
- else
- a_length=mi_getint(anc_buff);
- /* Save pointer to previous block */
- if (nod_flag)
- _mi_kpointer(info,key_block->end_pos,prev_block);
- t_length=(*keyinfo->pack_key)(keyinfo,nod_flag,
- (uchar*) 0,lastkey,lastkey,key,
- &s_temp);
- (*keyinfo->store_key)(keyinfo, key_block->end_pos+nod_flag,&s_temp);
- a_length+=t_length;
- mi_putint(anc_buff,a_length,nod_flag);
- key_block->end_pos+=t_length;
- if (a_length <= keyinfo->block_length)
- {
- VOID(_mi_move_key(keyinfo,key_block->lastkey,key));
- key_block->last_length=a_length-t_length;
- DBUG_RETURN(0);
- }
- /* Fill block with end-zero and write filled block */
- mi_putint(anc_buff,key_block->last_length,nod_flag);
- bzero((byte*) anc_buff+key_block->last_length,
- keyinfo->block_length- key_block->last_length);
- key_file_length=info->state->key_file_length;
- if ((filepos=_mi_new(info,keyinfo,DFLT_INIT_HITS)) == HA_OFFSET_ERROR)
- DBUG_RETURN(1);
- /* If we read the page from the key cache, we have to write it back to it */
- if (key_file_length == info->state->key_file_length)
- {
- if (_mi_write_keypage(info, keyinfo, filepos, DFLT_INIT_HITS, anc_buff))
- DBUG_RETURN(1);
- }
- else if (my_pwrite(info->s->kfile,(byte*) anc_buff,
- (uint) keyinfo->block_length,filepos, param->myf_rw))
- DBUG_RETURN(1);
- DBUG_DUMP("buff",(byte*) anc_buff,mi_getint(anc_buff));
- /* Write separator-key to block in next level */
- if (sort_insert_key(sort_param,key_block+1,key_block->lastkey,filepos))
- DBUG_RETURN(1);
- /* clear old block and write new key in it */
- key_block->inited=0;
- DBUG_RETURN(sort_insert_key(sort_param, key_block,key,prev_block));
- } /* sort_insert_key */
- /* Delete record when we found a duplicated key */
- static int sort_delete_record(MI_SORT_PARAM *sort_param)
- {
- uint i;
- int old_file,error;
- uchar *key;
- SORT_INFO *sort_info=sort_param->sort_info;
- MI_CHECK *param=sort_info->param;
- MI_INFO *info=sort_info->info;
- DBUG_ENTER("sort_delete_record");
- if ((param->testflag & (T_FORCE_UNIQUENESS|T_QUICK)) == T_QUICK)
- {
- mi_check_print_error(param,
- "Quick-recover aborted; Run recovery without switch -q or with switch -qq");
- DBUG_RETURN(1);
- }
- if (info->s->options & HA_OPTION_COMPRESS_RECORD)
- {
- mi_check_print_error(param,
- "Recover aborted; Can't run standard recovery on compressed tables with errors in data-file. Use switch 'myisamchk --safe-recover' to fix itn",stderr);;
- DBUG_RETURN(1);
- }
- old_file=info->dfile;
- info->dfile=info->rec_cache.file;
- if (sort_info->current_key)
- {
- key=info->lastkey+info->s->base.max_key_length;
- if ((error=(*info->s->read_rnd)(info,sort_param->record,info->lastpos,0)) &&
- error != HA_ERR_RECORD_DELETED)
- {
- mi_check_print_error(param,"Can't read record to be removed");
- info->dfile=old_file;
- DBUG_RETURN(1);
- }
- for (i=0 ; i < sort_info->current_key ; i++)
- {
- uint key_length=_mi_make_key(info,i,key,sort_param->record,info->lastpos);
- if (_mi_ck_delete(info,i,key,key_length))
- {
- mi_check_print_error(param,"Can't delete key %d from record to be removed",i+1);
- info->dfile=old_file;
- DBUG_RETURN(1);
- }
- }
- if (param->calc_checksum)
- param->glob_crc-=(*info->s->calc_checksum)(info, sort_param->record);
- }
- error=flush_io_cache(&info->rec_cache) || (*info->s->delete_record)(info);
- info->dfile=old_file; /* restore actual value */
- info->state->records--;
- DBUG_RETURN(error);
- } /* sort_delete_record */
- /* Fix all pending blocks and flush everything to disk */
- int flush_pending_blocks(MI_SORT_PARAM *sort_param)
- {
- uint nod_flag,length;
- my_off_t filepos,key_file_length;
- SORT_KEY_BLOCKS *key_block;
- SORT_INFO *sort_info= sort_param->sort_info;
- myf myf_rw=sort_info->param->myf_rw;
- MI_INFO *info=sort_info->info;
- MI_KEYDEF *keyinfo=sort_param->keyinfo;
- DBUG_ENTER("flush_pending_blocks");
- filepos= HA_OFFSET_ERROR; /* if empty file */
- nod_flag=0;
- for (key_block=sort_info->key_block ; key_block->inited ; key_block++)
- {
- key_block->inited=0;
- length=mi_getint(key_block->buff);
- if (nod_flag)
- _mi_kpointer(info,key_block->end_pos,filepos);
- key_file_length=info->state->key_file_length;
- bzero((byte*) key_block->buff+length, keyinfo->block_length-length);
- if ((filepos=_mi_new(info,keyinfo,DFLT_INIT_HITS)) == HA_OFFSET_ERROR)
- DBUG_RETURN(1);
- /* If we read the page from the key cache, we have to write it back */
- if (key_file_length == info->state->key_file_length)
- {
- if (_mi_write_keypage(info, keyinfo, filepos,
- DFLT_INIT_HITS, key_block->buff))
- DBUG_RETURN(1);
- }
- else if (my_pwrite(info->s->kfile,(byte*) key_block->buff,
- (uint) keyinfo->block_length,filepos, myf_rw))
- DBUG_RETURN(1);
- DBUG_DUMP("buff",(byte*) key_block->buff,length);
- nod_flag=1;
- }
- info->s->state.key_root[sort_param->key]=filepos; /* Last is root for tree */
- DBUG_RETURN(0);
- } /* flush_pending_blocks */
- /* alloc space and pointers for key_blocks */
- static SORT_KEY_BLOCKS *alloc_key_blocks(MI_CHECK *param, uint blocks,
- uint buffer_length)
- {
- reg1 uint i;
- SORT_KEY_BLOCKS *block;
- DBUG_ENTER("alloc_key_blocks");
- if (!(block=(SORT_KEY_BLOCKS*) my_malloc((sizeof(SORT_KEY_BLOCKS)+
- buffer_length+IO_SIZE)*blocks,
- MYF(0))))
- {
- mi_check_print_error(param,"Not enough memory for sort-key-blocks");
- return(0);
- }
- for (i=0 ; i < blocks ; i++)
- {
- block[i].inited=0;
- block[i].buff=(uchar*) (block+blocks)+(buffer_length+IO_SIZE)*i;
- }
- DBUG_RETURN(block);
- } /* alloc_key_blocks */
- /* Check if file is almost full */
- int test_if_almost_full(MI_INFO *info)
- {
- if (info->s->options & HA_OPTION_COMPRESS_RECORD)
- return 0;
- return (my_seek(info->s->kfile,0L,MY_SEEK_END,MYF(0))/10*9 >
- (my_off_t) (info->s->base.max_key_file_length) ||
- my_seek(info->dfile,0L,MY_SEEK_END,MYF(0))/10*9 >
- (my_off_t) info->s->base.max_data_file_length);
- }
- /* Recreate table with bigger more alloced record-data */
- int recreate_table(MI_CHECK *param, MI_INFO **org_info, char *filename)
- {
- int error;
- MI_INFO info;
- MYISAM_SHARE share;
- MI_KEYDEF *keyinfo,*key,*key_end;
- HA_KEYSEG *keysegs,*keyseg;
- MI_COLUMNDEF *recdef,*rec,*end;
- MI_UNIQUEDEF *uniquedef,*u_ptr,*u_end;
- MI_STATUS_INFO status_info;
- uint unpack,key_parts;
- ha_rows max_records;
- ulonglong file_length,tmp_length;
- MI_CREATE_INFO create_info;
- error=1; /* Default error */
- info= **org_info;
- status_info= (*org_info)->state[0];
- info.state= &status_info;
- share= *(*org_info)->s;
- unpack= (share.options & HA_OPTION_COMPRESS_RECORD) &&
- (param->testflag & T_UNPACK);
- if (!(keyinfo=(MI_KEYDEF*) my_alloca(sizeof(MI_KEYDEF)*share.base.keys)))
- return 0;
- memcpy((byte*) keyinfo,(byte*) share.keyinfo,
- (size_t) (sizeof(MI_KEYDEF)*share.base.keys));
- key_parts= share.base.all_key_parts;
- if (!(keysegs=(HA_KEYSEG*) my_alloca(sizeof(HA_KEYSEG)*
- (key_parts+share.base.keys))))
- {
- my_afree((gptr) keyinfo);
- return 1;
- }
- if (!(recdef=(MI_COLUMNDEF*)
- my_alloca(sizeof(MI_COLUMNDEF)*(share.base.fields+1))))
- {
- my_afree((gptr) keyinfo);
- my_afree((gptr) keysegs);
- return 1;
- }
- if (!(uniquedef=(MI_UNIQUEDEF*)
- my_alloca(sizeof(MI_UNIQUEDEF)*(share.state.header.uniques+1))))
- {
- my_afree((gptr) recdef);
- my_afree((gptr) keyinfo);
- my_afree((gptr) keysegs);
- return 1;
- }
- /* Copy the column definitions */
- memcpy((byte*) recdef,(byte*) share.rec,
- (size_t) (sizeof(MI_COLUMNDEF)*(share.base.fields+1)));
- for (rec=recdef,end=recdef+share.base.fields; rec != end ; rec++)
- {
- if (unpack && !(share.options & HA_OPTION_PACK_RECORD) &&
- rec->type != FIELD_BLOB &&
- rec->type != FIELD_VARCHAR &&
- rec->type != FIELD_CHECK)
- rec->type=(int) FIELD_NORMAL;
- }
- /* Change the new key to point at the saved key segments */
- memcpy((byte*) keysegs,(byte*) share.keyparts,
- (size_t) (sizeof(HA_KEYSEG)*(key_parts+share.base.keys+
- share.state.header.uniques)));
- keyseg=keysegs;
- for (key=keyinfo,key_end=keyinfo+share.base.keys; key != key_end ; key++)
- {
- key->seg=keyseg;
- for (; keyseg->type ; keyseg++)
- {
- if (param->language)
- keyseg->language=param->language; /* change language */
- }
- keyseg++; /* Skip end pointer */
- }
- /* Copy the unique definitions and change them to point at the new key
- segments*/
- memcpy((byte*) uniquedef,(byte*) share.uniqueinfo,
- (size_t) (sizeof(MI_UNIQUEDEF)*(share.state.header.uniques)));
- for (u_ptr=uniquedef,u_end=uniquedef+share.state.header.uniques;
- u_ptr != u_end ; u_ptr++)
- {
- u_ptr->seg=keyseg;
- keyseg+=u_ptr->keysegs+1;
- }
- if (share.options & HA_OPTION_COMPRESS_RECORD)
- share.base.records=max_records=info.state->records;
- else if (share.base.min_pack_length)
- max_records=(ha_rows) (my_seek(info.dfile,0L,MY_SEEK_END,MYF(0)) /
- (ulong) share.base.min_pack_length);
- else
- max_records=0;
- unpack= (share.options & HA_OPTION_COMPRESS_RECORD) &&
- (param->testflag & T_UNPACK);
- share.options&= ~HA_OPTION_TEMP_COMPRESS_RECORD;
- file_length=(ulonglong) my_seek(info.dfile,0L,MY_SEEK_END,MYF(0));
- tmp_length= file_length+file_length/10;
- set_if_bigger(file_length,param->max_data_file_length);
- set_if_bigger(file_length,tmp_length);
- set_if_bigger(file_length,(ulonglong) share.base.max_data_file_length);
- VOID(mi_close(*org_info));
- bzero((char*) &create_info,sizeof(create_info));
- create_info.max_rows=max(max_records,share.base.records);
- create_info.reloc_rows=share.base.reloc;
- create_info.old_options=(share.options |
- (unpack ? HA_OPTION_TEMP_COMPRESS_RECORD : 0));
- create_info.data_file_length=file_length;
- create_info.auto_increment=share.state.auto_increment;
- create_info.raid_type= share.base.raid_type;
- create_info.raid_chunks= share.base.raid_chunks;
- create_info.raid_chunksize= share.base.raid_chunksize;
- create_info.language = (param->language ? param->language :
- share.state.header.language);
- create_info.key_file_length= status_info.key_file_length;
- /* We don't have to handle symlinks here because we are using
- HA_DONT_TOUCH_DATA */
- if (mi_create(filename,
- share.base.keys - share.state.header.uniques,
- keyinfo, share.base.fields, recdef,
- share.state.header.uniques, uniquedef,
- &create_info,
- HA_DONT_TOUCH_DATA))
- {
- mi_check_print_error(param,"Got error %d when trying to recreate indexfile",my_errno);
- goto end;
- }
- *org_info=mi_open(filename,O_RDWR,
- (param->testflag & T_WAIT_FOREVER) ? HA_OPEN_WAIT_IF_LOCKED :
- (param->testflag & T_DESCRIPT) ? HA_OPEN_IGNORE_IF_LOCKED :
- HA_OPEN_ABORT_IF_LOCKED);
- if (!*org_info)
- {
- mi_check_print_error(param,"Got error %d when trying to open re-created indexfile",
- my_errno);
- goto end;
- }
- /* We are modifing */
- (*org_info)->s->options&= ~HA_OPTION_READ_ONLY_DATA;
- VOID(_mi_readinfo(*org_info,F_WRLCK,0));
- (*org_info)->state->records=info.state->records;
- if (share.state.create_time)
- (*org_info)->s->state.create_time=share.state.create_time;
- (*org_info)->s->state.unique=(*org_info)->this_unique=
- share.state.unique;
- (*org_info)->s->state.checksum=share.state.checksum;
- (*org_info)->state->del=info.state->del;
- (*org_info)->s->state.dellink=share.state.dellink;
- (*org_info)->state->empty=info.state->empty;
- (*org_info)->state->data_file_length=info.state->data_file_length;
- if (update_state_info(param,*org_info,UPDATE_TIME | UPDATE_STAT |
- UPDATE_OPEN_COUNT))
- goto end;
- error=0;
- end:
- my_afree((gptr) uniquedef);
- my_afree((gptr) keyinfo);
- my_afree((gptr) recdef);
- my_afree((gptr) keysegs);
- return error;
- }
- /* write suffix to data file if neaded */
- int write_data_suffix(SORT_INFO *sort_info, my_bool fix_datafile)
- {
- MI_INFO *info=sort_info->info;
- if (info->s->options & HA_OPTION_COMPRESS_RECORD && fix_datafile)
- {
- char buff[MEMMAP_EXTRA_MARGIN];
- bzero(buff,sizeof(buff));
- if (my_b_write(&info->rec_cache,buff,sizeof(buff)))
- {
- mi_check_print_error(sort_info->param,
- "%d when writing to datafile",my_errno);
- return 1;
- }
- sort_info->param->read_cache.end_of_file+=sizeof(buff);
- }
- return 0;
- }
- /* Update state and myisamchk_time of indexfile */
- int update_state_info(MI_CHECK *param, MI_INFO *info,uint update)
- {
- MYISAM_SHARE *share=info->s;
- if (update & UPDATE_OPEN_COUNT)
- {
- share->state.open_count=0;
- share->global_changed=0;
- }
- if (update & UPDATE_STAT)
- {
- uint i, key_parts= mi_uint2korr(share->state.header.key_parts);
- share->state.rec_per_key_rows=info->state->records;
- share->state.changed&= ~STATE_NOT_ANALYZED;
- if (info->state->records)
- {
- for (i=0; i<key_parts; i++)
- {
- if (!(share->state.rec_per_key_part[i]=param->rec_per_key_part[i]))
- share->state.changed|= STATE_NOT_ANALYZED;
- }
- }
- }
- if (update & (UPDATE_STAT | UPDATE_SORT | UPDATE_TIME | UPDATE_AUTO_INC))
- {
- if (update & UPDATE_TIME)
- {
- share->state.check_time= (long) time((time_t*) 0);
- if (!share->state.create_time)
- share->state.create_time=share->state.check_time;
- }
- /*
- When tables are locked we haven't synched the share state and the
- real state for a while so we better do it here before synching
- the share state to disk. Only when table is write locked is it
- necessary to perform this synch.
- */
- if (info->lock_type == F_WRLCK)
- share->state.state= *info->state;
- if (mi_state_info_write(share->kfile,&share->state,1+2))
- goto err;
- share->changed=0;
- }
- { /* Force update of status */
- int error;
- uint r_locks=share->r_locks,w_locks=share->w_locks;
- share->r_locks= share->w_locks= share->tot_locks= 0;
- error=_mi_writeinfo(info,WRITEINFO_NO_UNLOCK);
- share->r_locks=r_locks;
- share->w_locks=w_locks;
- share->tot_locks=r_locks+w_locks;
- if (!error)
- return 0;
- }
- err:
- mi_check_print_error(param,"%d when updating keyfile",my_errno);
- return 1;
- }
- /*
- Update auto increment value for a table
- When setting the 'repair_only' flag we only want to change the
- old auto_increment value if its wrong (smaller than some given key).
- The reason is that we shouldn't change the auto_increment value
- for a table without good reason when only doing a repair; If the
- user have inserted and deleted rows, the auto_increment value
- may be bigger than the biggest current row and this is ok.
- If repair_only is not set, we will update the flag to the value in
- param->auto_increment is bigger than the biggest key.
- */
- void update_auto_increment_key(MI_CHECK *param, MI_INFO *info,
- my_bool repair_only)
- {
- byte *record;
- if (!info->s->base.auto_key ||
- !(((ulonglong) 1 << (info->s->base.auto_key-1)
- & info->s->state.key_map)))
- {
- if (!(param->testflag & T_VERY_SILENT))
- mi_check_print_info(param,
- "Table: %s doesn't have an auto increment keyn",
- param->isam_file_name);
- return;
- }
- if (!(param->testflag & T_SILENT) &&
- !(param->testflag & T_REP))
- printf("Updating MyISAM file: %sn", param->isam_file_name);
- /*
- We have to use an allocated buffer instead of info->rec_buff as
- _mi_put_key_in_record() may use info->rec_buff
- */
- if (!(record= (byte*) my_malloc((uint) info->s->base.pack_reclength,
- MYF(0))))
- {
- mi_check_print_error(param,"Not enough memory for extra record");
- return;
- }
- mi_extra(info,HA_EXTRA_KEYREAD,0);
- if (mi_rlast(info, record, info->s->base.auto_key-1))
- {
- if (my_errno != HA_ERR_END_OF_FILE)
- {
- mi_extra(info,HA_EXTRA_NO_KEYREAD,0);
- my_free((char*) record, MYF(0));
- mi_check_print_error(param,"%d when reading last record",my_errno);
- return;
- }
- if (!repair_only)
- info->s->state.auto_increment=param->auto_increment_value;
- }
- else
- {
- ulonglong auto_increment= (repair_only ? info->s->state.auto_increment :
- param->auto_increment_value);
- info->s->state.auto_increment=0;
- update_auto_increment(info, record);
- set_if_bigger(info->s->state.auto_increment,auto_increment);
- }
- mi_extra(info,HA_EXTRA_NO_KEYREAD,0);
- my_free((char*) record, MYF(0));
- update_state_info(param, info, UPDATE_AUTO_INC);
- return;
- }
- /*
- Update statistics for each part of an index
- SYNOPSIS
- update_key_parts()
- keyinfo IN Index information (only key->keysegs used)
- rec_per_key_part OUT Store statistics here
- unique IN Array of (#distinct tuples)
- notnull_tuples IN Array of (#tuples), or NULL
- records Number of records in the table
- DESCRIPTION
- This function is called produce index statistics values from unique and
- notnull_tuples arrays after these arrays were produced with sequential
- index scan (the scan is done in two places: chk_index() and
- sort_key_write()).
- This function handles all 3 index statistics collection methods.
- Unique is an array:
- unique[0]= (#different values of {keypart1}) - 1
- unique[1]= (#different values of {keypart1,keypart2} tuple)-unique[0]-1
- ...
- For MI_STATS_METHOD_IGNORE_NULLS method, notnull_tuples is an array too:
- notnull_tuples[0]= (#of {keypart1} tuples such that keypart1 is not NULL)
- notnull_tuples[1]= (#of {keypart1,keypart2} tuples such that all
- keypart{i} are not NULL)
- ...
- For all other statistics collection methods notnull_tuples==NULL.
- Output is an array:
- rec_per_key_part[k] =
- = E(#records in the table such that keypart_1=c_1 AND ... AND
- keypart_k=c_k for arbitrary constants c_1 ... c_k)
-
- = {assuming that values have uniform distribution and index contains all
- tuples from the domain (or that {c_1, ..., c_k} tuple is choosen from
- index tuples}
-
- = #tuples-in-the-index / #distinct-tuples-in-the-index.
-
- The #tuples-in-the-index and #distinct-tuples-in-the-index have different
- meaning depending on which statistics collection method is used:
-
- MI_STATS_METHOD_* how are nulls compared? which tuples are counted?
- NULLS_EQUAL NULL == NULL all tuples in table
- NULLS_NOT_EQUAL NULL != NULL all tuples in table
- IGNORE_NULLS n/a tuples that don't have NULLs
- */
- void update_key_parts(MI_KEYDEF *keyinfo, ulong *rec_per_key_part,
- ulonglong *unique, ulonglong *notnull,
- ulonglong records)
- {
- ulonglong count=0,tmp, unique_tuples;
- ulonglong tuples= records;
- uint parts;
- for (parts=0 ; parts < keyinfo->keysegs ; parts++)
- {
- count+=unique[parts];
- unique_tuples= count + 1;
- if (notnull)
- {
- tuples= notnull[parts];
- /*
- #(unique_tuples not counting tuples with NULLs) =
- #(unique_tuples counting tuples with NULLs as different) -
- #(tuples with NULLs)
- */
- unique_tuples -= (records - notnull[parts]);
- }
-
- if (unique_tuples == 0)
- tmp= 1;
- else if (count == 0)
- tmp= tuples; /* 1 unique tuple */
- else
- tmp= (tuples + unique_tuples/2) / unique_tuples;
- /*
- for some weird keys (e.g. FULLTEXT) tmp can be <1 here.
- let's ensure it is not
- */
- set_if_bigger(tmp,1);
- if (tmp >= (ulonglong) ~(ulong) 0)
- tmp=(ulonglong) ~(ulong) 0;
- *rec_per_key_part=(ulong) tmp;
- rec_per_key_part++;
- }
- }
- static ha_checksum mi_byte_checksum(const byte *buf, uint length)
- {
- ha_checksum crc;
- const byte *end=buf+length;
- for (crc=0; buf != end; buf++)
- crc=((crc << 1) + *((uchar*) buf)) +
- test(crc & (((ha_checksum) 1) << (8*sizeof(ha_checksum)-1)));
- return crc;
- }
- static my_bool mi_too_big_key_for_sort(MI_KEYDEF *key, ha_rows rows)
- {
- uint key_maxlength=key->maxlength;
- if (key->flag & HA_FULLTEXT)
- {
- uint ft_max_word_len_for_sort=FT_MAX_WORD_LEN_FOR_SORT*
- key->seg->charset->mbmaxlen;
- key_maxlength+=ft_max_word_len_for_sort-HA_FT_MAXBYTELEN;
- }
- return (key->flag & HA_SPATIAL) ||
- (key->flag & (HA_BINARY_PACK_KEY | HA_VAR_LENGTH_KEY | HA_FULLTEXT) &&
- ((ulonglong) rows * key_maxlength >
- (ulonglong) myisam_max_temp_length));
- }
- /*
- Deactivate all not unique index that can be recreated fast
- These include packed keys on which sorting will use more temporary
- space than the max allowed file length or for which the unpacked keys
- will take much more space than packed keys.
- Note that 'rows' may be zero for the case when we don't know how many
- rows we will put into the file.
- */
- void mi_disable_non_unique_index(MI_INFO *info, ha_rows rows)
- {
- MYISAM_SHARE *share=info->s;
- MI_KEYDEF *key=share->keyinfo;
- uint i;
- DBUG_ASSERT(info->state->records == 0 &&
- (!rows || rows >= MI_MIN_ROWS_TO_DISABLE_INDEXES));
- for (i=0 ; i < share->base.keys ; i++,key++)
- {
- if (!(key->flag & (HA_NOSAME | HA_SPATIAL | HA_AUTO_KEY)) &&
- ! mi_too_big_key_for_sort(key,rows) && info->s->base.auto_key != i+1)
- {
- share->state.key_map&= ~ ((ulonglong) 1 << i);
- info->update|= HA_STATE_CHANGED;
- }
- }
- }
- /*
- Return TRUE if we can use repair by sorting
- One can set the force argument to force to use sorting
- even if the temporary file would be quite big!
- */
- my_bool mi_test_if_sort_rep(MI_INFO *info, ha_rows rows,
- ulonglong key_map, my_bool force)
- {
- MYISAM_SHARE *share=info->s;
- MI_KEYDEF *key=share->keyinfo;
- uint i;
- /*
- mi_repair_by_sort only works if we have at least one key. If we don't
- have any keys, we should use the normal repair.
- */
- if (!key_map)
- return FALSE; /* Can't use sort */
- for (i=0 ; i < share->base.keys ; i++,key++)
- {
- if (!force && mi_too_big_key_for_sort(key,rows))
- return FALSE;
- }
- return TRUE;
- }
- static void
- set_data_file_type(SORT_INFO *sort_info, MYISAM_SHARE *share)
- {
- if ((sort_info->new_data_file_type=share->data_file_type) ==
- COMPRESSED_RECORD && sort_info->param->testflag & T_UNPACK)
- {
- MYISAM_SHARE tmp;
- if (share->options & HA_OPTION_PACK_RECORD)
- sort_info->new_data_file_type = DYNAMIC_RECORD;
- else
- sort_info->new_data_file_type = STATIC_RECORD;
- /* Set delete_function for sort_delete_record() */
- memcpy((char*) &tmp, share, sizeof(*share));
- tmp.options= ~HA_OPTION_COMPRESS_RECORD;
- mi_setup_functions(&tmp);
- share->delete_record=tmp.delete_record;
- }
- }