Here are the examples of the java api lib.aptacluster.Buckets taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
3 Examples
19
Source : Export.java
with GNU General Public License v3.0
from drivenbyentropy
with GNU General Public License v3.0
from drivenbyentropy
clreplaced ClusterSizeQSComparator implements QSComparator {
private Buckets buckets = null;
public ClusterSizeQSComparator(Buckets buckets) {
this.buckets = buckets;
}
@Override
public int compare(int a, int b) {
return -Integer.compare(buckets.get(a).size(), buckets.get(b).size());
}
}
1
Source : Export.java
with GNU General Public License v3.0
from drivenbyentropy
with GNU General Public License v3.0
from drivenbyentropy
/**
* Exports clusters sorted by the cluster diversity, i.e. the number of unique sequences per cluster.
*
* Sorted by cluster size. Sorted by aptamer count inside clusters.
* @param sc the selection cycles whos clusters to export
* @param cc the cluster container produces by AptaCluster
* @param p the path to which export the data to
*/
private void ClustersByDiversity(SelectionCycle sc, ClusterContainer cc, Path p) {
// Load a writer instance depending on the configuration
ExportWriter writer = Configuration.getParameters().getBoolean("Export.compress") ? new CompressedExportWriter() : new UncompressedExportWriter();
writer.open(p);
// Arrange the data for export
// Specifically we need a hashmap with cluster ids as keys and lists of aptamer ids as values
Buckets buckets = Buckets.withExpectedSize(sc.getSize());
// We will also need to sort the keys by size of their values
// Iterate over the aptamers in the selection cycle and extract cluster membership
for (Entry<Integer, Integer> item : sc.iterator()) {
int cluster_id = cc.getClusterId(item.getKey());
// Only take into account items which have a cluster membership
if (cluster_id == -1) {
continue;
}
if (!buckets.contains(cluster_id)) {
buckets.justPut(cluster_id, IntLists.mutable.of(item.getKey()));
} else {
buckets.get(cluster_id).add(item.getKey());
}
}
int total_number_of_clusters = buckets.size();
// Now remove all clusters which do not contain the specified minimal number of items
int minimal_cluster_size = Configuration.getParameters().getInt("Export.MinimalClusterSize");
MutableIntList to_be_deleted = IntLists.mutable.empty();
for (Entry<Integer, MutableIntList> bucket : buckets.entrySet()) {
if (bucket.getValue().size() <= minimal_cluster_size) {
to_be_deleted.add(bucket.getKey());
}
}
MutableInreplacederator it = to_be_deleted.inreplacederator();
while (it.hasNext()) {
buckets.justRemove(it.next());
}
// We need to sort the clusters according to their sizes
int[] cluster_ids_by_size = new int[buckets.size()];
int counter = 0;
for (Entry<Integer, MutableIntList> bucket : buckets.entrySet()) {
cluster_ids_by_size[counter] = bucket.getKey();
counter++;
}
/**
* @author Jan Hoinka
* This comparator sorts in descending order according to the
* size of the clusters
*/
clreplaced ClusterSizeQSComparator implements QSComparator {
private Buckets buckets = null;
public ClusterSizeQSComparator(Buckets buckets) {
this.buckets = buckets;
}
@Override
public int compare(int a, int b) {
return -Integer.compare(buckets.get(a).size(), buckets.get(b).size());
}
}
Quicksort.sort(cluster_ids_by_size, new ClusterSizeQSComparator(buckets));
// Export the clusters to file
/**
* @author Jan Hoinka
* This comparator sorts in descending order according to the
* size of the clusters
*/
clreplaced AptamerSizeQSComparator implements QSComparator {
private SelectionCycle sc = null;
public AptamerSizeQSComparator(SelectionCycle sc) {
this.sc = sc;
}
@Override
public int compare(int arg1, int arg2) {
return -Integer.compare(sc.getAptamerCardinality(arg1), sc.getAptamerCardinality(arg2));
}
}
// Iterate over all remaining clusters and write to file
AptamerPool ap = Configuration.getExperiment().getAptamerPool();
boolean include_primer_regions = Configuration.getParameters().getBoolean("Export.IncludePrimerRegions");
for (int cluster_id : cluster_ids_by_size) {
// we need to sort the aptamers by size first
int[] aptamer_ids = buckets.get(cluster_id).toArray();
Quicksort.sort(aptamer_ids, new AptamerSizeQSComparator(sc));
// Finally we can write the data to file
writer.write(">>Cluster_" + cluster_id + "\t" + buckets.get(cluster_id).size() + "\n");
for (int aptamer_id : aptamer_ids) {
writer.write(">Aptamer_" + aptamer_id + "\n");
String sequence;
if (include_primer_regions) {
sequence = new String(ap.getAptamer(aptamer_id));
} else {
AptamerBounds ab = ap.getAptamerBounds(aptamer_id);
sequence = new String(ap.getAptamer(aptamer_id), ab.startIndex, (ab.endIndex - ab.startIndex));
}
writer.write(String.format("%s %s\n", sequence, sc.getAptamerCardinality(aptamer_id)));
}
writer.write("\n");
}
writer.close();
}
0
Source : Export.java
with GNU General Public License v3.0
from drivenbyentropy
with GNU General Public License v3.0
from drivenbyentropy
/**
* Exports the clusters according to their size. Size is defined as
* the sum of aptamer cardinalities over all cluster members.
* @param sc the selection cycles who's clusters to export
* @param cc the cluster container produces by AptaCluster
* @param p the path to which export the data to
*/
private void ClustersBySize(SelectionCycle sc, ClusterContainer cc, Path p) {
// Load a writer instance depending on the configuration
ExportWriter writer = Configuration.getParameters().getBoolean("Export.compress") ? new CompressedExportWriter() : new UncompressedExportWriter();
writer.open(p);
// Arrange the data for export
// Specifically we need a hashmap with cluster ids as keys and lists of aptamer ids as values
Buckets buckets = Buckets.withExpectedSize(sc.getSize());
// We will also need to sort the keys by size of their values
// Iterate over the aptamers in the selection cycle and extract cluster membership
for (Entry<Integer, Integer> item : sc.iterator()) {
int cluster_id = cc.getClusterId(item.getKey());
// Only take into account items which have a cluster membership
if (cluster_id == -1) {
continue;
}
if (!buckets.contains(cluster_id)) {
buckets.justPut(cluster_id, IntLists.mutable.of(item.getKey()));
} else {
buckets.get(cluster_id).add(item.getKey());
}
}
int total_number_of_clusters = buckets.size();
// Now remove all clusters which do not contain the specified minimal number of items
int minimal_cluster_size = Configuration.getParameters().getInt("Export.MinimalClusterSize");
// For this, we need an auxiliary map recording the cluster sizes
TIntIntHashMap cluster_sizes = new TIntIntHashMap(total_number_of_clusters);
for (Entry<Integer, MutableIntList> bucket : buckets.entrySet()) {
// Cmpute the cluster size and store it
int sum = 0;
MutableInreplacederator it = bucket.getValue().inreplacederator();
while (it.hasNext()) {
sum += sc.getAptamerCardinality(it.next());
}
cluster_sizes.put(bucket.getKey().intValue(), sum);
}
MutableIntList to_be_deleted = IntLists.mutable.empty();
for (Entry<Integer, MutableIntList> bucket : buckets.entrySet()) {
if (cluster_sizes.get(bucket.getKey().intValue()) <= minimal_cluster_size) {
to_be_deleted.add(bucket.getKey());
}
}
MutableInreplacederator it = to_be_deleted.inreplacederator();
while (it.hasNext()) {
buckets.justRemove(it.next());
}
// We need to sort the clusters according to their sizes
int[] cluster_ids_by_size = new int[buckets.size()];
int counter = 0;
for (Entry<Integer, MutableIntList> bucket : buckets.entrySet()) {
cluster_ids_by_size[counter] = bucket.getKey();
counter++;
}
/**
* @author Jan Hoinka
* This comparator sorts in descending order according to the
* size of the clusters
*/
clreplaced ClusterSizeQSComparator implements QSComparator {
private TIntIntHashMap cluster_sizes = null;
public ClusterSizeQSComparator(TIntIntHashMap cluster_sizes) {
this.cluster_sizes = cluster_sizes;
}
@Override
public int compare(int a, int b) {
return -Integer.compare(cluster_sizes.get(a), cluster_sizes.get(b));
}
}
Quicksort.sort(cluster_ids_by_size, new ClusterSizeQSComparator(cluster_sizes));
// Export the clusters to file
/**
* @author Jan Hoinka
* This comparator sorts in descending order according to the
* size of the clusters
*/
clreplaced AptamerSizeQSComparator implements QSComparator {
private SelectionCycle sc = null;
public AptamerSizeQSComparator(SelectionCycle sc) {
this.sc = sc;
}
@Override
public int compare(int arg1, int arg2) {
return -Integer.compare(sc.getAptamerCardinality(arg1), sc.getAptamerCardinality(arg2));
}
}
// Iterate over all remaining clusters and write to file
AptamerPool ap = Configuration.getExperiment().getAptamerPool();
boolean include_primer_regions = Configuration.getParameters().getBoolean("Export.IncludePrimerRegions");
for (int cluster_id : cluster_ids_by_size) {
// we need to sort the aptamers by size first
int[] aptamer_ids = buckets.get(cluster_id).toArray();
Quicksort.sort(aptamer_ids, new AptamerSizeQSComparator(sc));
// Finally we can write the data to file
writer.write(">>Cluster_" + cluster_id + "\t" + cluster_sizes.get(cluster_id) + "\n");
for (int aptamer_id : aptamer_ids) {
writer.write(">Aptamer_" + aptamer_id + "\n");
String sequence;
if (include_primer_regions) {
sequence = new String(ap.getAptamer(aptamer_id));
} else {
AptamerBounds ab = ap.getAptamerBounds(aptamer_id);
sequence = new String(ap.getAptamer(aptamer_id), ab.startIndex, (ab.endIndex - ab.startIndex));
}
writer.write(String.format("%s %s\n", sequence, sc.getAptamerCardinality(aptamer_id)));
}
writer.write("\n");
}
writer.close();
}