`
lingqi1818
  • 浏览: 249020 次
  • 性别: Icon_minigender_1
  • 来自: 杭州
社区版块
存档分类
最新评论

hbase海量数据的全量导入方法

阅读更多
最近有个需求要对mysql的全量数据迁移到hbase,虽然hbase的设计非常利于高效的读取,但是它的compaction实现对海量数据写入造成非常大的影响,数据到一定量之后,就开始抽风。
分析hbase的实现,不管其运行的机制,其最终存储结构为分布式文件系统中的hfile格式。
刚好hbase的源代码中提供一个HFileOutputFormat类,分析其源代码可以看到:
/**
 * Copyright 2009 The Apache Software Foundation
 *
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.hbase.mapreduce;

import java.io.IOException;
import java.util.Map;
import java.util.TreeMap;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.mortbay.log.Log;

/**
 * Writes HFiles. Passed KeyValues must arrive in order.
 * Currently, can only write files to a single column family at a
 * time.  Multiple column families requires coordinating keys cross family.
 * Writes current time as the sequence id for the file. Sets the major compacted
 * attribute on created hfiles.
 * @see KeyValueSortReducer
 */
public class HFileOutputFormat extends FileOutputFormat<ImmutableBytesWritable, KeyValue> {
  public RecordWriter<ImmutableBytesWritable, KeyValue> getRecordWriter(TaskAttemptContext context)
  throws IOException, InterruptedException {
    // Get the path of the temporary output file 
    final Path outputPath = FileOutputFormat.getOutputPath(context);
    final Path outputdir = new FileOutputCommitter(outputPath, context).getWorkPath();
    Configuration conf = context.getConfiguration();
    final FileSystem fs = outputdir.getFileSystem(conf);
    // These configs. are from hbase-*.xml
    final long maxsize = conf.getLong("hbase.hregion.max.filesize", 268435456);
    final int blocksize = conf.getInt("hfile.min.blocksize.size", 65536);
    // Invented config.  Add to hbase-*.xml if other than default compression.
    final String compression = conf.get("hfile.compression",
      Compression.Algorithm.NONE.getName());

    return new RecordWriter<ImmutableBytesWritable, KeyValue>() {
      // Map of families to writers and how much has been output on the writer.
      private final Map<byte [], WriterLength> writers =
        new TreeMap<byte [], WriterLength>(Bytes.BYTES_COMPARATOR);
      private byte [] previousRow = HConstants.EMPTY_BYTE_ARRAY;
      private final byte [] now = Bytes.toBytes(System.currentTimeMillis());

      public void write(ImmutableBytesWritable row, KeyValue kv)
      throws IOException {
        long length = kv.getLength();
        byte [] family = kv.getFamily();
        WriterLength wl = this.writers.get(family);
        if (wl == null || ((length + wl.written) >= maxsize) &&
            Bytes.compareTo(this.previousRow, 0, this.previousRow.length,
              kv.getBuffer(), kv.getRowOffset(), kv.getRowLength()) != 0) {
          // Get a new writer.
          Path basedir = new Path(outputdir, Bytes.toString(family));
          if (wl == null) {
            wl = new WriterLength();
            this.writers.put(family, wl);
            if (this.writers.size() > 1) throw new IOException("One family only");
            // If wl == null, first file in family.  Ensure family dir exits.
            if (!fs.exists(basedir)) fs.mkdirs(basedir);
          }
          wl.writer = getNewWriter(wl.writer, basedir);
          Log.info("Writer=" + wl.writer.getPath() +
            ((wl.written == 0)? "": ", wrote=" + wl.written));
          wl.written = 0;
        }
        kv.updateLatestStamp(this.now);
        wl.writer.append(kv);
        wl.written += length;
        // Copy the row so we know when a row transition.
        this.previousRow = kv.getRow();
      }

      /* Create a new HFile.Writer. Close current if there is one.
       * @param writer
       * @param familydir
       * @return A new HFile.Writer.
       * @throws IOException
       */
      private HFile.Writer getNewWriter(final HFile.Writer writer,
          final Path familydir)
      throws IOException {
        close(writer);
        return new HFile.Writer(fs,  StoreFile.getUniqueFile(fs, familydir),
          blocksize, compression, KeyValue.KEY_COMPARATOR);
      }

      private void close(final HFile.Writer w) throws IOException {
        if (w != null) {
          StoreFile.appendMetadata(w, System.currentTimeMillis(), true);
          w.close();
        }
      }

      public void close(TaskAttemptContext c)
      throws IOException, InterruptedException {
        for (Map.Entry<byte [], WriterLength> e: this.writers.entrySet()) {
          close(e.getValue().writer);
        }
      }
    };
  }

  /*
   * Data structure to hold a Writer and amount of data written on it. 
   */
  static class WriterLength {
    long written = 0;
    HFile.Writer writer = null;
  }
}


可以看到,它的工作流程就是首先根据你的配置文件初始化,然后写成hfile的格式。
这里我做了个偷懒的demo:
HFileOutputFormat hf = new HFileOutputFormat();
        HBaseConfiguration conf = new HBaseConfiguration();
        conf.addResource(new Path("/home/performance/softs/hadoop/conf/core-site.xml"));
        conf.set("mapred.output.dir", "/tmp");
        conf.set("hfile.compression", Compression.Algorithm.LZO.getName());
        TaskAttemptContext context = new TaskAttemptContext(conf, new TaskAttemptID());
        RecordWriter writer = hf.getRecordWriter(context);
        KeyValue kv = new KeyValue(Bytes.toBytes("1111111111111"), Bytes.toBytes("offer:action"),
                                   System.currentTimeMillis(), Bytes.toBytes("test"));
        KeyValue kv1 = new KeyValue(Bytes.toBytes("1111111111111"), Bytes.toBytes("offer:id"),
                                    System.currentTimeMillis(), Bytes.toBytes("123"));
        KeyValue kv3 = new KeyValue(Bytes.toBytes("1111111111112"), Bytes.toBytes("offer:action"),
                                    System.currentTimeMillis(), Bytes.toBytes("test"));
        KeyValue kv4 = new KeyValue(Bytes.toBytes("1111111111112"), Bytes.toBytes("offer:id"),
                                    System.currentTimeMillis(), Bytes.toBytes("123"));
        writer.write(null, kv);
        writer.write(null, kv1);
        writer.write(null, kv3);
        writer.write(null, kv4);
        writer.close(context);

执行然之后,会在hdfs的/tmp目录下生成一份文件。注意批量写数据的时候一定要保证key的有序性
这个时候,hbase自己提供的一个基于jruby的loadtable.rb脚本就可以发挥作用了。
它的格式是loadtable.rb 你希望的表明 hdfs路径:
hbase org.jruby.Main loadtable.rb offer hdfs://user/root/importoffer/_temporary/_attempt__0000_r_000000_0/
执行完之后:
运行./hbase shell
>list
就会显示刚才导入的offer表了。
分享到:
评论
3 楼 Angel_Night 2012-03-26  
解决了我的难题啊

十分感谢
2 楼 依然仰望天空 2011-11-04  
很不错,学习
1 楼 pengpeng 2011-02-18  
牛 ~~    学习了

相关推荐

Global site tag (gtag.js) - Google Analytics