Question

I have a Spring Web Application. It maps the model Education to a lucene index through Hibernate Search:

@Entity
@Table(name="educations")
@Indexed
public class Education {

    @Id
    @GeneratedValue(strategy = GenerationType.AUTO)
    @Field(termVector = TermVector.WITH_POSITION_OFFSETS)
    private long id;

    @Column(name = "name")
    @Field(termVector = TermVector.WITH_POSITION_OFFSETS)
    @Boost(value = 1.5f)
    private String name;

    @Column(name = "local_name")
    private String localName;

    @Column(name = "description", columnDefinition="TEXT")
    @Field(termVector = TermVector.WITH_POSITION_OFFSETS)
    private String description;

This works great!

Now im trying to cluster my Lucene index through Mahout 0.9. I got a basic K-means clustering going but I don't know how to convert my Lucene Index to Mahout vectors.

Here is my basic K-Means clustering class that works with some test data points as shown below:

package com.courseportal.project.utils.lsh.util;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.mahout.clustering.Cluster;
import org.apache.mahout.clustering.classify.WeightedPropertyVectorWritable;
import org.apache.mahout.clustering.kmeans.KMeansDriver;
import org.apache.mahout.clustering.kmeans.Kluster;
import org.apache.mahout.common.distance.EuclideanDistanceMeasure;
import org.apache.mahout.math.RandomAccessSparseVector;
import org.apache.mahout.math.Vector;
import org.apache.mahout.math.VectorWritable;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

public class SimpleKMeansClustering {

    public static final double[][] points = {
            {1, 1}, {2, 1}, {1, 2},
            {2, 2}, {3, 3}, {8, 8},
            {9, 8}, {8, 9}, {9, 9}};

    public static void writePointsToFile(List<Vector> points,
                                         String fileName,
                                         FileSystem fs,
                                         Configuration conf) throws IOException {
        Path path = new Path(fileName);
        SequenceFile.Writer writer = new SequenceFile.Writer(fs, conf,
                path, LongWritable.class, VectorWritable.class);
        long recNum = 0;
        VectorWritable vec = new VectorWritable();
        for (Vector point : points) {
            vec.set(point);
            writer.append(new LongWritable(recNum++), vec);
        }
        writer.close();
    }

    public static List<Vector> getPoints(double[][] raw) {
        List<Vector> points = new ArrayList<Vector>();
        for (int i = 0; i < raw.length; i++) {
            double[] fr = raw[i];
            Vector vec = new RandomAccessSparseVector(fr.length);
            vec.assign(fr);
            points.add(vec);

        }
        return points;
    }

    public static void main(String args[]) throws Exception {

        int k = 2;

        List<Vector> vectors = getPoints(points);

        File testData = new File("clustering/testdata");
        if (!testData.exists()) {
            testData.mkdir();
        }
        testData = new File("clustering/testdata/points");
        if (!testData.exists()) {
            testData.mkdir();
        }

        Configuration conf = new Configuration();
        FileSystem fs = FileSystem.get(conf);
        writePointsToFile(vectors, "clustering/testdata/points/file1", fs, conf);

        Path path = new Path("clustering/testdata/clusters/part-00000");
        SequenceFile.Writer writer = new SequenceFile.Writer(fs, conf, path, Text.class, Kluster.class);

        for (int i = 0; i < k; i++) {
            Vector vec = vectors.get(i);
            Kluster cluster = new Kluster(vec, i, new EuclideanDistanceMeasure());
            writer.append(new Text(cluster.getIdentifier()), cluster);
        }
        writer.close();

        KMeansDriver.run(conf,
                new Path("clustering/testdata/points"),
                new Path("clustering/testdata/clusters"),
                new Path("clustering/output"),
                0.001,
                10,
                true,
                0,
                true);

        SequenceFile.Reader reader = new SequenceFile.Reader(fs,
                new Path("clustering/output/" + Cluster.CLUSTERED_POINTS_DIR + "/part-m-0"), conf);

        IntWritable key = new IntWritable();
        WeightedPropertyVectorWritable value = new WeightedPropertyVectorWritable();
        while (reader.next(key, value)) {
            System.out.println(value.toString() + " belongs to cluster " + key.toString());
        }
        reader.close();
    }
}

I read (here) that I should use LuceneIndexToSequenceFiles to do this but I can't find that class in Mahout 0.9. Is this something that I have to pull in manually?

How can I convert my index to work with my K-means clustering class?

Was it helpful?

Solution

You can use package org.apache.mahout.text; and classes SequenceFilesFromLuceneStorageMRJob (for distributed convert) or SequenceFilesFromLuceneStorageDriver.

More info about their usage you can find in mahout-0.9 tests. For example here:

mahout-0.9/integration/src/test/java/org/apache/mahout/text/SequenceFilesFromLuceneStorageDriverTest.java
mahout-0.9/integration/src/test/java/org/apache/mahout/text/SequenceFilesFromLuceneStorageMRJob.java

and here https://mahout.apache.org/users/basics/creating-vectors-from-text.html

Important note: Your Lucene index must be created with the same version of Lucene used in Mahout.

OTHER TIPS

To elaborate a little bit on the answer in this thread and maybe help someone in the future. This is how I finally did it.

  1. I had to put store = Store.YES on my hibernate fields.
  2. As already been said, I had to make sure Hibernate search and Mahout used the same Lucene version. Here is my current pom.xml

      <!-- **********************************************************************
           **               DEPENDENCIES FOR MACHINE LEARNING                  **
           ********************************************************************** -->
    <dependency>
        <groupId>org.hibernate</groupId>
        <artifactId>hibernate-search</artifactId>
        <version>5.0.0.Alpha2</version>
    </dependency>
    <dependency>
        <groupId>org.apache.mahout</groupId>
        <artifactId>mahout-core</artifactId>
        <version>0.9</version>
    </dependency>
    <dependency>
        <groupId>org.apache.mahout</groupId>
        <artifactId>mahout-integration</artifactId>
        <version>0.9</version>
    </dependency>
    <dependency>
        <groupId>org.apache.mahout</groupId>
        <artifactId>mahout-utils</artifactId>
        <version>0.5</version>
    </dependency>
    
  3. Then I created sequence files, generated sparse vectors and preformed the clustering by the following code. Still need to be optimized.


import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.util.ToolRunner;
import org.apache.mahout.clustering.Cluster;
import org.apache.mahout.clustering.canopy.CanopyDriver;
import org.apache.mahout.clustering.classify.WeightedPropertyVectorWritable;
import org.apache.mahout.clustering.kmeans.KMeansDriver;
import org.apache.mahout.common.distance.TanimotoDistanceMeasure;
import org.apache.mahout.text.LuceneStorageConfiguration;
import org.apache.mahout.text.SequenceFilesFromLuceneStorage;
import org.apache.mahout.vectorizer.SparseVectorsFromSequenceFiles;

import com.google.common.collect.Lists;

import java.util.Arrays;
import java.util.List;

public class SimpleKMeansClustering {

    public static void main(String args[]) throws Exception {

        Configuration conf = new Configuration();
        FileSystem fs = FileSystem.get(conf);

        Path indexFilesPath = new Path("lucene/indexes/educations");
        Path sequenceFilesPath = new Path("clustering/testdata/sequencefiles/");
        Path sparseVectorsPath = new Path("clustering/testdata/sparsevectors/");
        Path tfVectorsPath = new Path("clustering/testdata/sparsevectors/tf-vectors");
        Path inputClustersPath = new Path("clustering/testdata/input-clusters");
        Path finishedInputClustersPath = new Path("clustering/testdata/input-clusters/clusters-0-final");
        Path finalClustersPath = new Path("clustering/output");

        //Create sequence files from Index
        LuceneStorageConfiguration luceneStorageConf = new LuceneStorageConfiguration(conf, 
                Arrays.asList(indexFilesPath), sequenceFilesPath, "id",
                Arrays.asList("name", "description"));

        SequenceFilesFromLuceneStorage sequenceFilefromLuceneStorage = new SequenceFilesFromLuceneStorage();
        sequenceFilefromLuceneStorage.run(luceneStorageConf);

        //Generate Sparse vectors from sequence files
        generateSparseVectors(true,
                              true,
                              true,
                              5,
                              4,
                              sequenceFilesPath,
                              sparseVectorsPath);

        //Generate input clusters for K-means (instead of have K randomly initiated)
        TanimotoDistanceMeasure tanimoDistance = new TanimotoDistanceMeasure();
        CanopyDriver.run(tfVectorsPath,
                         inputClustersPath,
                         tanimoDistance, 
                         (float) 3.1,
                         (float) 2.1,
                         false,
                         (float) 0.2,
                         true);

        //Generate K-Means clusters
        KMeansDriver.run(conf,
                         tfVectorsPath,
                         finishedInputClustersPath,
                         finalClustersPath,
                         0.001,
                         10,
                         true,
                         0,
                         true);

        //Read and print out the clusters in the console
        SequenceFile.Reader reader = new SequenceFile.Reader(fs,
                new Path("clustering/output/" + Cluster.CLUSTERED_POINTS_DIR + "/part-m-0"),
                conf);

        IntWritable key = new IntWritable();
        WeightedPropertyVectorWritable value = new WeightedPropertyVectorWritable();
        while (reader.next(key, value)) {
            System.out.println(value.toString() + " belongs to cluster " + key.toString());
        }
        reader.close();
    }

    public static void generateSparseVectors (boolean tfWeighting, boolean sequential, boolean named, double maxDFSigma, int numDocs, Path inputPath, Path outputPath) throws Exception {

        List argList = Lists.newLinkedList();
        argList.add("-i");
        argList.add(inputPath.toString());
        argList.add("-o");
        argList.add(outputPath.toString());

        if (sequential) {
            argList.add("-seq");
        }

        if (named) {
            argList.add("-nv");
        }

        if (maxDFSigma >= 0) {
            argList.add("--maxDFSigma");
            argList.add(String.valueOf(maxDFSigma));
        }

        if (tfWeighting) {
            argList.add("--weight");
            argList.add("tf");
        }

        String[] args = argList.toArray(new String[argList.size()]);

        ToolRunner.run(new SparseVectorsFromSequenceFiles(), args);
    }

}
Licensed under: CC-BY-SA with attribution
Not affiliated with StackOverflow
scroll top