Here are the examples of the java api org.apache.commons.csv.CSVRecord taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
357 Examples
19
Source : MockController.java
with MIT License
from wolforest
with MIT License
from wolforest
// "id","ordId","date","source","installCount","registerCount","basicInfoCount","aadhaarCount","bankCardCount"
private void insertKoi(CSVRecord record) {
Map<String, Object> data = new HashMap<>();
data.put("org_id", 1);
data.put("date", parseDate(record.get("date")));
data.put("source", record.get("source"));
data.put("install_count", Integer.valueOf(record.get("installCount")));
data.put("register_count", Integer.valueOf(record.get("registerCount")));
data.put("basic_info_count", Integer.valueOf(record.get("basicInfoCount")));
data.put("aadhaar_count", Integer.valueOf(record.get("aadhaarCount")));
data.put("bank_card_count", Integer.valueOf(record.get("bankCardCount")));
Sql sql = Sql.insert("daily_koi").values(data);
System.out.println(sql.getSql());
System.out.println(data);
jdbc.update(sql.getSql(), sql.getData());
}
19
Source : MockController.java
with MIT License
from wolforest
with MIT License
from wolforest
// "auditPreviewCount","auditRequestCount","auditPaidCount","auditPaidAmount")
private void insertPreviewCount(CSVRecord record) {
Map<String, Object> data = new HashMap<>();
data.put("org_id", 1);
data.put("date", parseDate(record.get("date")));
data.put("source", record.get("source"));
data.put("trade_type", TradeTypeEnum.LOAN_CONTRACT.getCode());
data.put("state", (new NewState()).getCode());
data.put("trade_count", Integer.valueOf(record.get("auditPreviewCount")));
Sql sql = Sql.insert("daily_trade").values(data);
System.out.println(sql.getSql());
System.out.println(data);
jdbc.update(sql.getSql(), sql.getData());
}
19
Source : MockController.java
with MIT License
from wolforest
with MIT License
from wolforest
@RequestMapping("/mock/audit")
public String audit() throws IOException {
InputStream in = this.getClreplaced().getClreplacedLoader().getResourcereplacedtream("audit.csv");
Reader reader = new InputStreamReader(in, CharsetUtil.UTF_8);
Iterable<CSVRecord> records = CSVFormat.DEFAULT.withFirstRecordAsHeader().withTrim().parse(reader);
for (CSVRecord record : records) {
insertKoi(record);
insertPreviewCount(record);
insertRequestCount(record);
insertPaidCount(record);
}
return "mock audit ...";
}
19
Source : Profiler.java
with Apache License 2.0
from whylabs
with Apache License 2.0
from whylabs
private void parseBatch(final Instant time, final Map<String, Integer> headers, final CSVRecord record) {
val ds = profiles.computeIfAbsent(time, (t) -> new DatasetProfile(input.getName(), t));
for (String header : headers.keySet()) {
val idx = headers.get(header);
val value = record.get(idx);
ds.track(header, value);
}
}
19
Source : Profiler.java
with Apache License 2.0
from whylabs
with Apache License 2.0
from whylabs
/**
* Switch to #stressTest if we want to battle test the memory usage further
*/
private void parseToDateTime(final Map<String, Integer> headers, final CSVRecord record) {
String issueDate = record.get(this.datetime.column);
val time = this.dateTimeParser.parse(issueDate);
val ds = profiles.computeIfAbsent(time, t -> new DatasetProfile(input.getName(), t));
for (String header : headers.keySet()) {
val idx = headers.get(header);
val value = record.get(idx);
ds.track(header, value);
}
}
19
Source : CsvRowToOrderBookMapper.java
with MIT License
from valb3r
with MIT License
from valb3r
private static double dbl(String key, CSVRecord record) {
return Double.valueOf(record.get(key));
}
19
Source : FileImportAction.java
with Apache License 2.0
from trixon
with Apache License 2.0
from trixon
private String getOrDefault(CSVRecord record, String key, String defaultValue) {
if (record.isSet(key)) {
return record.get(key);
} else {
return defaultValue;
}
}
19
Source : CsvTestBuilder.java
with Apache License 2.0
from tmobile
with Apache License 2.0
from tmobile
private List<Step> parseRow(CSVRecord row, CSVRecord header) {
if (row.get(0) == null) {
return null;
}
if (row.get(0) != null) {
String stepName = row.get(0);
if (stepName == null || stepName == "" || stepName.startsWith("#")) {
return null;
}
}
List<Step> steps = buildStep(test, row, header);
if (steps != null) {
steps.forEach(step -> step.setTest(test));
}
return steps;
}
19
Source : CsvDatabaseImporter.java
with GNU General Public License v3.0
from TheLastProject
with GNU General Public License v3.0
from TheLastProject
/**
* Import a single group into the database using the given
* session.
*/
private void importGroup(SQLiteDatabase database, DBHelper helper, CSVRecord record) throws IOException, FormatException {
String id = extractString(DBHelper.LoyaltyCardDbGroups.ID, record, null);
helper.insertGroup(database, id);
}
19
Source : CsvDatabaseImporter.java
with GNU General Public License v3.0
from TheLastProject
with GNU General Public License v3.0
from TheLastProject
/**
* Extract a long from the items array. The index into the array
* is determined by looking up the index in the fields map using the
* "key" as the key. If no such key exists, or the data is not a valid
* int, a FormatException is thrown.
*/
private Long extractLong(String key, CSVRecord record, boolean nullIsOk) throws FormatException {
if (record.isMapped(key) == false) {
throw new FormatException("Field not used but expected: " + key);
}
String value = record.get(key);
if (value.isEmpty() && nullIsOk) {
return null;
}
try {
return Long.parseLong(record.get(key));
} catch (NumberFormatException e) {
throw new FormatException("Failed to parse field: " + key, e);
}
}
19
Source : CsvDatabaseImporter.java
with GNU General Public License v3.0
from TheLastProject
with GNU General Public License v3.0
from TheLastProject
/**
* Extract an integer from the items array. The index into the array
* is determined by looking up the index in the fields map using the
* "key" as the key. If no such key exists, or the data is not a valid
* int, a FormatException is thrown.
*/
private Integer extractInt(String key, CSVRecord record, boolean nullIsOk) throws FormatException {
if (record.isMapped(key) == false) {
throw new FormatException("Field not used but expected: " + key);
}
String value = record.get(key);
if (value.isEmpty() && nullIsOk) {
return null;
}
try {
return Integer.parseInt(record.get(key));
} catch (NumberFormatException e) {
throw new FormatException("Failed to parse field: " + key, e);
}
}
19
Source : CsvDatabaseImporter.java
with GNU General Public License v3.0
from TheLastProject
with GNU General Public License v3.0
from TheLastProject
/**
* Extract a string from the items array. The index into the array
* is determined by looking up the index in the fields map using the
* "key" as the key. If no such key exists, defaultValue is returned
* if it is not null. Otherwise, a FormatException is thrown.
*/
private String extractString(String key, CSVRecord record, String defaultValue) throws FormatException {
String toReturn = defaultValue;
if (record.isMapped(key)) {
toReturn = record.get(key);
} else {
if (defaultValue == null) {
throw new FormatException("Field not used but expected: " + key);
}
}
return toReturn;
}
19
Source : CsvDatabaseImporter.java
with GNU General Public License v3.0
from TheLastProject
with GNU General Public License v3.0
from TheLastProject
public void parseV2Cards(DBHelper db, SQLiteDatabase database, String data) throws IOException, FormatException, InterruptedException {
// Parse cards
final CSVParser cardParser = new CSVParser(new StringReader(data), CSVFormat.RFC4180.withHeader());
try {
for (CSVRecord record : cardParser) {
importLoyaltyCard(database, db, record);
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException();
}
}
} catch (IllegalArgumentException | IllegalStateException e) {
throw new FormatException("Issue parsing CSV data", e);
} finally {
cardParser.close();
}
}
19
Source : CsvDatabaseImporter.java
with GNU General Public License v3.0
from TheLastProject
with GNU General Public License v3.0
from TheLastProject
public void parseV2CardGroups(DBHelper db, SQLiteDatabase database, String data) throws IOException, FormatException, InterruptedException {
// Parse card group mappings
final CSVParser cardGroupParser = new CSVParser(new StringReader(data), CSVFormat.RFC4180.withHeader());
try {
for (CSVRecord record : cardGroupParser) {
importCardGroupMapping(database, db, record);
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException();
}
}
} catch (IllegalArgumentException | IllegalStateException e) {
throw new FormatException("Issue parsing CSV data", e);
} finally {
cardGroupParser.close();
}
}
19
Source : CsvDatabaseImporter.java
with GNU General Public License v3.0
from TheLastProject
with GNU General Public License v3.0
from TheLastProject
public void parseV2Groups(DBHelper db, SQLiteDatabase database, String data) throws IOException, FormatException, InterruptedException {
// Parse groups
final CSVParser groupParser = new CSVParser(new StringReader(data), CSVFormat.RFC4180.withHeader());
try {
for (CSVRecord record : groupParser) {
importGroup(database, db, record);
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException();
}
}
} catch (IllegalArgumentException | IllegalStateException e) {
throw new FormatException("Issue parsing CSV data", e);
} finally {
groupParser.close();
}
}
19
Source : CsvUtils.java
with Apache License 2.0
from testingisdocumenting
with Apache License 2.0
from testingisdocumenting
public static List<Map<String, String>> parse(List<String> header, String content) {
List<Map<String, String>> tableData = new ArrayList<>();
CSVParser csvRecords = readCsvRecords(header, content);
Collection<String> headerToUse = header.isEmpty() ? csvRecords.getHeaderMap().keySet() : header;
for (CSVRecord record : csvRecords) {
tableData.add(createRow(headerToUse, record));
}
return tableData;
}
19
Source : CsvUtils.java
with Apache License 2.0
from testingisdocumenting
with Apache License 2.0
from testingisdocumenting
private static Map<String, String> createRow(Collection<String> header, CSVRecord record) {
Map<String, String> row = new LinkedHashMap<>();
int idx = 0;
try {
for (String columnName : header) {
String value = record.get(idx);
row.put(columnName, value);
idx++;
}
} catch (Exception e) {
throw new RuntimeException("Can't parse " + record, e);
}
return row;
}
19
Source : HAProxyStatsExtractor.java
with Apache License 2.0
from sematext
with Apache License 2.0
from sematext
public List<HAProxyStats> getStats() throws StatsCollectionFailedException {
List<HAProxyStats> results = Lists.newArrayList();
String content = source.fetchData();
CSVParser parser = null;
try {
parser = CSVParser.parse(content, CSVFormat.DEFAULT.withHeader());
} catch (IOException e) {
throw new StatsCollectionFailedException("Data parse failed", e);
}
try {
for (final CSVRecord record : parser) {
key.proxyName = record.get("# pxname");
key.serviceName = record.get("svname");
HAProxyStats stat = stats.get(key);
if (stat == null) {
stat = new HAProxyStats();
HAProxyStatsKey statKey = new HAProxyStatsKey(key);
stats.put(statKey, stat);
stat.proxyName = record.get("# pxname");
stat.serviceName = record.get("svname");
}
stat.curQueuedRequests = parseLong(record.get("qcur"));
stat.maxQueuedRequests = parseLong(record.get("qmax"));
stat.curSessions = parseLong(record.get("scur"));
stat.maxSessions = parseLong(record.get("smax"));
stat.sessionsLimit = parseLong(record.get("slim"));
stat.totalSessions = parseCounter(record.get("stot"), stat.totalSessionsHolder);
stat.bytesIn = parseCounter(record.get("bin"), stat.bytesInHolder);
stat.bytesOut = parseCounter(record.get("bout"), stat.bytesOutHolder);
stat.deniedRequests = parseCounter(record.get("dreq"), stat.deniedRequestsHolder);
stat.deniedResponses = parseCounter(record.get("dresp"), stat.deniedResponsesHolder);
stat.reqErrors = parseCounter(record.get("ereq"), stat.reqErrorsHolder);
stat.conErrors = parseCounter(record.get("econ"), stat.conErrorsHolder);
stat.resErrors = parseCounter(record.get("eresp"), stat.resErrorsHolder);
stat.retries = parseCounter(record.get("wretr"), stat.retriesHolder);
stat.redispatches = parseCounter(record.get("wredis"), stat.redispatchesHolder);
String status = record.get("status");
if ("UP".equals(status) || "OPEN".equals(status)) {
stat.status = 1d;
} else if ("DOWN".equals(status)) {
stat.status = -1d;
} else {
stat.status = 0d;
}
stat.serverWeight = parseLong(record.get("weight"));
String active = record.get("act");
if ("Y".equalsIgnoreCase(active)) {
stat.active = stat.serverWeight;
} else {
stat.active = 0l;
}
String backup = record.get("bck");
if ("Y".equalsIgnoreCase(backup)) {
stat.backup = stat.serverWeight;
} else {
stat.backup = 0l;
}
stat.numFailedChecks = parseCounter(record.get("chkfail"), stat.numFailedChecksHolder);
stat.numUpDownTransitions = parseCounter(record.get("chkdown"), stat.numUpDownTransitionsHolder);
stat.lastStatusChange = parseLong(record.get("lastchg"));
stat.downtime = parseGauge(record.get("downtime"), stat.downtimeHolder);
stat.queueLimit = parseLong(record.get("qlimit"));
stat.numSelected = parseCounter(record.get("lbtot"), stat.numSelectedHolder);
String type = record.get("type");
if ("0".equalsIgnoreCase(type)) {
stat.type = "FRONTEND";
} else if ("1".equalsIgnoreCase(type)) {
stat.type = "BACKEND";
} else if ("2".equalsIgnoreCase(type)) {
stat.type = "SERVER";
} else if ("3".equalsIgnoreCase(type)) {
stat.type = "SOCKET";
} else {
stat.type = "UNKNOWN";
}
stat.rate = parseLong(record.get("rate"));
stat.rateLimit = parseLong(record.get("rate_lim"));
stat.rateMax = parseLong(record.get("rate_max"));
results.add(stat);
}
} finally {
try {
parser.close();
} catch (IOException e) {
}
}
return results;
}
19
Source : RecordToXMLSerializer.java
with Mozilla Public License 2.0
from secdec
with Mozilla Public License 2.0
from secdec
public static String getFromReader(CSVParser parser) {
StringBuilder builder = getStart();
int i = -1;
for (CSVRecord strings : parser) {
i++;
addRecord(builder, i, strings.toMap());
}
return writeEnd(builder);
}
19
Source : WordSearchComparison.java
with GNU Lesser General Public License v3.0
from searchhub
with GNU Lesser General Public License v3.0
from searchhub
private static void appendToList(Map<String, String> tpCandidates, CSVRecord csvRecord) {
String targetWord = csvRecord.get(0);
String[] variants = csvRecord.get(2).split(",");
for (String variant : variants) {
tpCandidates.put(variant, targetWord);
}
}
19
Source : CsvParser.java
with MIT License
from reposense
with MIT License
from reposense
/**
* Returns true if the {@code record} at {@code colNum} is prefixed with the override keyword.
*/
protected boolean isElementOverridingStandaloneConfig(final CSVRecord record, int colNum) {
return get(record, colNum).startsWith(OVERRIDE_KEYWORD);
}
19
Source : CsvParser.java
with MIT License
from reposense
with MIT License
from reposense
/**
* Returns the value of {@code record} at {@code colNum} if present, or
* returns {@code defaultValue} otherwise.
*/
protected String getOrDefault(final CSVRecord record, int colNum, String defaultValue) {
return get(record, colNum).isEmpty() ? defaultValue : get(record, colNum);
}
19
Source : CsvParser.java
with MIT License
from reposense
with MIT License
from reposense
/**
* Returns the value of {@code record} at {@code colNum} as a {@code List},
* delimited by {@code COLUMN_VALUES_SEPARATOR} if it is in {@code record} and not empty, or
* returns an empty {@code List} otherwise.
*/
protected List<String> getAsList(final CSVRecord record, int colNum) {
if (get(record, colNum).isEmpty()) {
return Collections.emptyList();
}
return Arrays.stream(get(record, colNum).split(COLUMN_VALUES_SEPARATOR)).map(String::trim).collect(Collectors.toList());
}
19
Source : CsvParser.java
with MIT License
from reposense
with MIT License
from reposense
/**
* Returns the value of {@code record} at {@code colNum}.
*/
protected String get(final CSVRecord record, int colNum) {
return record.get(colNum).trim();
}
19
Source : CsvParser.java
with MIT License
from reposense
with MIT License
from reposense
/**
* Returns true if {@code record} does not contain the same number of columns as the header or contains missing
* values at the mandatory columns in CSV format.
*/
private boolean isLineMalformed(CSVRecord record) {
if (!record.isConsistent()) {
logger.warning(String.format(MESSAGE_MALFORMED_LINE_FORMAT, getLineNumber(record), csvFilePath.getFileName(), getRowContentAsRawString(record)));
return true;
}
for (int position : mandatoryPositions()) {
if (record.get(position).isEmpty()) {
logger.warning(String.format(MESSAGE_MALFORMED_LINE_FORMAT, getLineNumber(record), csvFilePath.getFileName(), getRowContentAsRawString(record)));
return true;
}
}
return false;
}
19
Source : CsvParser.java
with MIT License
from reposense
with MIT License
from reposense
private long getLineNumber(final CSVRecord record) {
return record.getRecordNumber() + numOfLinesBeforeFirstRecord;
}
19
Source : CsvParser.java
with MIT License
from reposense
with MIT License
from reposense
/**
* Returns the contents of {@code record} as a raw string.
*/
private String getRowContentAsRawString(final CSVRecord record) {
StringJoiner inputRowString = new StringJoiner(",");
for (String value : record) {
inputRowString.add(value);
}
String contentreplacedtring = inputRowString.toString();
if (contentreplacedtring.trim().isEmpty()) {
contentreplacedtring = MESSAGE_EMPTY_LINE;
}
return contentreplacedtring;
}
19
Source : Endpoint.java
with Apache License 2.0
from Remper
with Apache License 2.0
from Remper
public DBpediaResource getResourceById(String resourceId) {
Map<String, List<String>> properties = new HashMap<>();
CloseableHttpResponse response = null;
try {
response = executeQuery(getQuery().replace(":resourceId", resourceId));
for (CSVRecord record : process(response)) {
List<String> propertyContainer = properties.getOrDefault(record.get("relation"), new LinkedList<>());
propertyContainer.add(record.get("property"));
properties.put(record.get("relation"), propertyContainer);
}
} catch (URISyntaxException | IOException e) {
logger.error(String.format("Error while querying KB with resource ID %s", resourceId), e);
} finally {
if (response != null) {
try {
response.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
return new DBpediaResource(resourceId, properties);
}
19
Source : CSVToDocUnitConvertService.java
with GNU Affero General Public License v3.0
from progilone
with GNU Affero General Public License v3.0
from progilone
private Map<String, String> getLineFromMappedHeader(final List<String> entetes, final CSVRecord record) {
final Map<String, String> line = new HashMap<>();
for (int i = 0; i < record.size(); i++) {
if (i < entetes.size() && StringUtils.isNotEmpty(record.get(i))) {
// ajout prefixe colx_ pour gerer les proprietes multiples
final String key = "col" + i + "_" + entetes.get(i);
final String value = record.get(i);
line.putIfAbsent(key, value);
}
}
return line;
}
19
Source : CSVToDocUnitConvertService.java
with GNU Affero General Public License v3.0
from progilone
with GNU Affero General Public License v3.0
from progilone
private Map<String, String> getLine(final CSVRecord header, final CSVRecord record) {
final Map<String, String> line = new HashMap<>();
for (int i = 0; i < record.size(); i++) {
if (i < header.size() && StringUtils.isNotEmpty(record.get(i))) {
// ajout prefixe colx_ pour gerer les proprietes multiples
final String key = "col" + i + "_" + header.get(i);
final String value = record.get(i);
line.putIfAbsent(key, value);
}
}
return line;
}
19
Source : SystemScanTaskV7.java
with Apache License 2.0
from pegasystems
with Apache License 2.0
from pegasystems
private LinkedList<String> getRecordDataList(CSVRecord csvRecord) {
LinkedList<String> recordDataList = new LinkedList<>();
Iterator<String> colIt = csvRecord.iterator();
while (colIt.hasNext()) {
String col = colIt.next();
recordDataList.add(col);
}
return recordDataList;
}
19
Source : CsvCachedRowSetImpl.java
with Apache License 2.0
from osalvador
with Apache License 2.0
from osalvador
private void readData() throws SQLException {
// Close current cursor and reaopen.
int currentFetchSize = getFetchSize();
setFetchSize(0);
close();
setFetchSize(currentFetchSize);
moveToInsertRow();
CSVRecord record;
for (int i = 1; i <= getFetchSize(); i++) {
lineNumer++;
try {
if (this.records.iterator().hasNext()) {
record = this.records.iterator().next();
for (int j = 0; j <= this.columnsTypes.length - 1; j++) {
switch(this.columnsTypes[j]) {
case "VARCHAR":
case "CHAR":
case "LONGVARCHAR":
updateString(j + 1, record.get(j));
break;
case "INTEGER":
updateInt(j + 1, Integer.parseInt(record.get(j)));
break;
case "TINYINT":
updateByte(j + 1, Byte.parseByte(record.get(j)));
break;
case "SMALLINT":
updateShort(j + 1, Short.parseShort(record.get(j)));
break;
case "BIGINT":
updateLong(j + 1, Long.parseLong(record.get(j)));
break;
case "NUMERIC":
case "DECIMAL":
/*
* "0" [0,0]
* "0.00" [0,2]
* "123" [123,0]
* "-123" [-123,0]
* "1.23E3" [123,-1]
* "1.23E+3" [123,-1]
* "12.3E+7" [123,-6]
* "12.0" [120,1]
* "12.3" [123,1]
* "0.00123" [123,5]
* "-1.23E-12" [-123,14]
* "1234.5E-4" [12345,5]
* "0E+7" [0,-7]
* "-0" [0,0]
*/
updateBigDecimal(j + 1, new BigDecimal(record.get(j)));
break;
case "DOUBLE":
updateDouble(j + 1, Double.parseDouble(record.get(j)));
break;
case "FLOAT":
updateFloat(j + 1, Float.parseFloat(record.get(j)));
break;
case "DATE":
// yyyy-[m]m-[d]d
updateDate(j + 1, Date.valueOf(record.get(j)));
break;
case "TIMESTAMP":
// yyyy-[m]m-[d]d hh:mm:ss[.f...]
updateTimestamp(j + 1, Timestamp.valueOf(record.get(j)));
break;
case "TIME":
// hh:mm:ss
updateTime(j + 1, Time.valueOf(record.get(j)));
break;
case "BOOLEAN":
updateBoolean(j + 1, convertToBoolean(record.get(j)));
break;
default:
updateString(j + 1, record.get(j));
break;
}
}
insertRow();
}
} catch (Exception e) {
LOG.error("An error has occurred reading line number " + lineNumer + " of the CSV file", e);
throw e;
}
}
moveToCurrentRow();
beforeFirst();
}
19
Source : IssueRecords.java
with MIT License
from onozaty
with MIT License
from onozaty
private IssueRecord toIssueRecord(CSVRecord csvRecord) {
PrimaryKey primaryKey = null;
IssueTargetFieldsBuilder targetFieldsBuilder = new IssueTargetFieldsBuilder();
for (FieldSetting fieldSetting : config.getFields()) {
String value = convertValue(csvRecord.get(fieldSetting.getHeaderName()), fieldSetting);
FieldType fieldType = fieldSetting.getType();
switch(fieldType) {
case ISSUE_ID:
primaryKey = new IssueId(Integer.parseInt(value));
break;
case CUSTOM_FIELD:
CustomField customField = new CustomField(fieldSetting.getCustomFieldId(), value);
if (fieldSetting.isPrimaryKey()) {
primaryKey = customField;
} else {
targetFieldsBuilder.customField(customField);
}
break;
default:
// その他の項目は更新対象フィールドとして利用
targetFieldsBuilder.field(fieldType, value);
break;
}
}
return new IssueRecord(primaryKey, targetFieldsBuilder.build());
}
19
Source : SamplingCsvLoaderWorker.java
with Apache License 2.0
from nhl
with Apache License 2.0
from nhl
protected void addRow(int width, CSVRecord record) {
for (int i = 0; i < width; i++) {
columnAcreplacedulators[i].add(record);
}
}
19
Source : SamplingCsvLoaderWorker.java
with Apache License 2.0
from nhl
with Apache License 2.0
from nhl
protected void replaceRow(int pos, int width, CSVRecord record) {
for (int i = 0; i < width; i++) {
columnAcreplacedulators[i].set(pos, record);
}
}
19
Source : SamplingCsvLoaderWorker.java
with Apache License 2.0
from nhl
with Apache License 2.0
from nhl
protected void consumeCSV(Iterator<CSVRecord> it) {
int width = columnIndex.size();
int i = 0;
while (it.hasNext()) {
CSVRecord row = it.next();
sampleRow(i++, width, row);
}
}
19
Source : SamplingCsvLoaderWorker.java
with Apache License 2.0
from nhl
with Apache License 2.0
from nhl
protected void sampleRow(int rowNumber, int width, CSVRecord row) {
// Reservoir sampling algorithm per https://en.wikipedia.org/wiki/Reservoir_sampling
// fill "reservoir" first
if (rowNumber < rowSampleSize) {
addRow(width, row);
sampledRows.addInt(rowNumber);
} else // replace previously filled values based on random sampling with decaying probability
{
int pos = rowsSampleRandom.nextInt(rowNumber + 1);
if (pos < rowSampleSize) {
replaceRow(pos, width, row);
sampledRows.setInt(pos, rowNumber);
}
}
}
19
Source : FilteringSamplingCsvLoaderWorker.java
with Apache License 2.0
from nhl
with Apache License 2.0
from nhl
@Override
protected void consumeCSV(Iterator<CSVRecord> it) {
int width = columnIndex.size();
int i = 0;
while (it.hasNext()) {
CSVRecord row = it.next();
// perform filtering in a separate buffer before sampling....
// 1. fill the buffer for condition evaluation. All values will be converted to the right data types
int csvWidth = csvRow.length;
for (int j = 0; j < csvWidth; j++) {
csvRow[j].set(row);
}
// 2. eval filters
if (csvRowFilter.test(csvRow)) {
// 3. sample row since the condition is satisfied
sampleBufferedRow(i++, width);
}
}
}
19
Source : FilteringCsvLoaderWorker.java
with Apache License 2.0
from nhl
with Apache License 2.0
from nhl
@Override
protected void addRow(int width, CSVRecord row) {
// 1. fill the buffer for condition evaluation. All values will be converted to the right data types
int csvWidth = csvRow.length;
for (int i = 0; i < csvWidth; i++) {
csvRow[i].set(row);
}
// 2. eval filters
if (csvRowFilter.test(csvRow)) {
// 3. add row since the condition is satisfied
for (int i = 0; i < width; i++) {
columnAcreplacedulators[i].add(csvRow);
}
}
}
19
Source : CsvLoader.java
with Apache License 2.0
from nhl
with Apache License 2.0
from nhl
private Index loadCsvHeader(CSVRecord header) {
int width = header.size();
String[] columnNames = new String[width];
for (int i = 0; i < width; i++) {
columnNames[i] = header.get(i);
}
return Index.forLabels(columnNames);
}
19
Source : BaseCsvLoaderWorker.java
with Apache License 2.0
from nhl
with Apache License 2.0
from nhl
protected void addRow(int width, CSVRecord row) {
for (int i = 0; i < width; i++) {
columnAcreplacedulators[i].add(row);
}
}
19
Source : ElevateQueryComparer.java
with Apache License 2.0
from mitre
with Apache License 2.0
from mitre
private static QuerySet loadQueries(Path file) throws Exception {
QuerySet querySet = new QuerySet();
Matcher uc = Pattern.compile("[A-Z]").matcher("");
try (InputStream is = Files.newInputStream(file)) {
try (Reader reader = new InputStreamReader(new BOMInputStream(is), "UTF-8")) {
Iterable<CSVRecord> records = CSVFormat.EXCEL.withFirstRecordAsHeader().parse(reader);
for (CSVRecord record : records) {
String q = record.get("query");
Integer c = Integer.parseInt(record.get("count"));
if (querySet.queries.containsKey(q)) {
LOG.warn("duplicate queries?! >" + q + "<");
}
querySet.set(q, c);
}
}
}
LOG.info("loaded " + querySet.queries.size() + " queries");
return querySet;
}
19
Source : ElevateAnalysisEvaluator.java
with Apache License 2.0
from mitre
with Apache License 2.0
from mitre
private Map<String, Integer> loadQueries(Path queryFile) throws IOException {
Map<String, Integer> queries = new HashMap<>();
try (Reader reader = new InputStreamReader(new BOMInputStream(Files.newInputStream(queryFile)), "UTF-8")) {
Iterable<CSVRecord> records = CSVFormat.EXCEL.withFirstRecordAsHeader().parse(reader);
boolean hasCount = false;
if ((((CSVParser) records)).getHeaderMap().containsKey("count")) {
hasCount = true;
}
for (CSVRecord r : records) {
String query = r.get("query");
query = query.toLowerCase(Locale.US);
int cnt = 1;
if (hasCount) {
String count = r.get("count");
cnt = Integer.parseInt(count);
}
Integer existing = queries.get(query);
if (existing != null) {
cnt += existing;
}
queries.put(query, cnt);
}
}
return queries;
}
19
Source : QueryLoader.java
with Apache License 2.0
from mitre
with Apache License 2.0
from mitre
private static QueryStrings getQueryStrings(Set<String> queryStringNames, CSVRecord record) {
QueryStrings queryStrings = new QueryStrings();
for (String name : queryStringNames) {
queryStrings.addQueryString(name, record.get(name));
}
return queryStrings;
}
19
Source : QueryLoader.java
with Apache License 2.0
from mitre
with Apache License 2.0
from mitre
private static Map<String, Judgments> loadJudmentsWithoutId(boolean hasJudgments, boolean hasQuerySet, boolean hasCount, Set<String> queryStringNames, Iterable<CSVRecord> records) {
// queryset, Map<queryInfo.getId, Judgments>
Map<String, Map<QueryStrings, Judgments>> queries = new HashMap<>();
int uniqueJudgments = 0;
for (CSVRecord record : records) {
String querySet = (hasQuerySet) ? record.get(QUERY_SET) : QueryInfo.DEFAULT_QUERY_SET;
QueryStrings queryStrings = getQueryStrings(queryStringNames, record);
int queryCount = (hasCount) ? Integer.parseInt(record.get(COUNT)) : 1;
Judgments judgments = null;
if (queries.containsKey(querySet) && queries.get(querySet).containsKey(queryStrings)) {
QueryInfo cachedQueryInfo = queries.get(querySet).get(queryStrings).getQueryInfo();
QueryInfo newQueryInfo = new QueryInfo(cachedQueryInfo.getQueryId(), querySet, queryStrings, queryCount);
if (!cachedQueryInfo.equals(newQueryInfo)) {
throw new IllegalArgumentException("There's a mismatch between the previously loaded:" + cachedQueryInfo + "\nand the QueryInfo loaded for this row: " + newQueryInfo);
}
judgments = queries.get(querySet).get(queryStrings);
} else {
String queryId = Integer.toString(uniqueJudgments++);
QueryInfo newQueryInfo = new QueryInfo(queryId, querySet, queryStrings, queryCount);
judgments = new Judgments(newQueryInfo);
if (queries.containsKey(querySet)) {
queries.get(querySet).put(queryStrings, judgments);
} else {
Map<QueryStrings, Judgments> map = new HashMap<>();
map.put(queryStrings, judgments);
queries.put(querySet, map);
}
}
if (hasJudgments) {
String doreplacedentId = record.get(DOreplacedENT_ID);
double relevanceScore = Double.parseDouble(record.get(RELEVANCE));
judgments.addJudgment(doreplacedentId, relevanceScore);
}
}
Map<String, Judgments> ret = new HashMap<>();
for (Map.Entry<String, Map<QueryStrings, Judgments>> e : queries.entrySet()) {
for (Judgments j : e.getValue().values()) {
ret.put(j.getQueryInfo().getQueryId(), j);
}
}
return ret;
}
19
Source : QueryLoader.java
with Apache License 2.0
from mitre
with Apache License 2.0
from mitre
private static Map<String, Judgments> loadJudgmentsWithId(boolean hasJudgments, boolean hasQuerySet, boolean hasCount, Set<String> queryStringNames, Iterable<CSVRecord> records) throws SQLException {
// queryId, judgments
Map<String, Judgments> judgmentsMap = new HashMap<>();
for (CSVRecord record : records) {
String querySet = (hasQuerySet) ? record.get(QUERY_SET) : QueryInfo.DEFAULT_QUERY_SET;
QueryStrings queryStrings = getQueryStrings(queryStringNames, record);
int queryCount = (hasCount) ? Integer.parseInt(record.get(COUNT)) : 1;
String queryId = record.get(QUERY_ID);
if (StringUtils.isBlank(queryId)) {
throw new IllegalArgumentException("If the csv has a '" + QUERY_ID + "' column, " + "there must be a non-empty value for every row");
}
QueryInfo newQueryInfo = new QueryInfo(queryId, querySet, queryStrings, queryCount);
if (judgmentsMap.containsKey(queryId)) {
QueryInfo cachedQueryInfo = judgmentsMap.get(queryId).getQueryInfo();
if (!newQueryInfo.equals(cachedQueryInfo)) {
throw new IllegalArgumentException("There's a mismatch between the previously loaded:" + cachedQueryInfo + "\nand the QueryInfo loaded for this row: " + newQueryInfo);
}
} else {
judgmentsMap.put(queryId, new Judgments(newQueryInfo));
}
if (hasJudgments) {
String doreplacedentId = record.get(DOreplacedENT_ID);
double relevanceScore = Double.parseDouble(record.get(RELEVANCE));
Judgments judgments = judgmentsMap.get(newQueryInfo.getQueryId());
judgments.addJudgment(doreplacedentId, relevanceScore);
}
}
return judgmentsMap;
}
19
Source : CompareAnalyzers.java
with Apache License 2.0
from mitre
with Apache License 2.0
from mitre
private static List<QueryTokenPair> loadQueries(Path path, SearchClient searchClient, String baseField, String filterField) throws IOException, SearchClientException {
Set<String> queries = new HashSet<>();
try (InputStream is = Files.newInputStream(path)) {
try (Reader reader = new InputStreamReader(new BOMInputStream(is), "UTF-8")) {
Iterable<CSVRecord> records = CSVFormat.EXCEL.withFirstRecordAsHeader().parse(reader);
for (CSVRecord record : records) {
String query = record.get("query");
queries.add(query);
}
}
}
List<QueryTokenPair> queryTokenPairs = new ArrayList<>();
int max = 0;
for (String query : queries) {
List<String> basereplacedyzed = searchClient.replacedyze(baseField, query);
List<String> allFiltered = new ArrayList<>();
for (String baseToken : basereplacedyzed) {
List<String> filtered = searchClient.replacedyze(filterField, baseToken);
if (filtered.size() == 0) {
filtered.add("");
}
allFiltered.add(StringUtils.join(filtered, ", "));
}
queryTokenPairs.add(new QueryTokenPair(query, allFiltered));
}
return queryTokenPairs;
}
19
Source : AccuracyTest.java
with MIT License
from MighTguY
with MIT License
from MighTguY
private void appendToList(Map<String, String> tpCandidates, CSVRecord csvRecord) {
String targetWord = csvRecord.get(0);
String[] variants = csvRecord.get(2).split(",");
for (String variant : variants) {
tpCandidates.put(variant, targetWord);
}
}
19
Source : ApacheCommonsCsvDataRow.java
with GNU General Public License v3.0
from mecatran
with GNU General Public License v3.0
from mecatran
public clreplaced ApacheCommonsCsvDataRow implements DataRow {
private CSVRecord record;
private ApacheCommonsCsvDataTable csvDataTable;
public ApacheCommonsCsvDataRow(ApacheCommonsCsvDataTable csvDataTable, CSVRecord record) {
this.csvDataTable = csvDataTable;
this.record = record;
}
@Override
public String getString(String field) {
if (record.getRecordNumber() == 1)
csvDataTable.recordReadField(field);
if (!record.isSet(field))
return null;
String ret = record.get(field);
if (ret == null)
return null;
if (ret.isEmpty())
return null;
return ret;
}
@Override
public DataObjectSourceInfo getSourceInfo() {
List<String> headerColumns = csvDataTable.getColumnHeaders();
List<String> fields = new ArrayList<>(headerColumns.size());
for (String column : headerColumns) {
try {
fields.add(record.get(column));
} catch (IllegalArgumentException e) {
fields.add(null);
}
}
return new DataObjectSourceInfoImpl(csvDataTable.getTableSourceInfo(), fields, csvDataTable.getCurrentLineNumber());
}
@Override
public DataObjectSourceRef getSourceRef() {
return new DataObjectSourceRef(csvDataTable.getTableName(), csvDataTable.getCurrentLineNumber());
}
@Override
public int getRecordCount() {
return record.size();
}
}
19
Source : CreateRestrictionsFromSnz.java
with GNU Affero General Public License v3.0
from matsim-org
with GNU Affero General Public License v3.0
from matsim-org
/**
* Read durations from a single input file for a day.
*/
static Object2DoubleMap<String> readDurations(File file, IntSet zipCodes) throws IOException {
Object2DoubleMap<String> sums = new Object2DoubleOpenHashMap<>();
try (BufferedReader reader = IOUtils.getBufferedReader(file.toString())) {
CSVParser parse = CSVFormat.DEFAULT.withDelimiter(',').withFirstRecordAsHeader().parse(reader);
for (CSVRecord record : parse) {
if (!record.get("zipCode").contains("NULL")) {
int zipCode = Integer.parseInt(record.get("zipCode"));
if (zipCodes.contains(zipCode)) {
double duration = Double.parseDouble(record.get("durationSum"));
String actType = record.get("actType");
sums.mergeDouble(actType, duration, Double::sum);
if (!actType.equals("home")) {
sums.mergeDouble("notAtHome", duration, Double::sum);
if (!actType.equals("education") && !actType.equals("leisure")) {
sums.mergeDouble("notAtHomeExceptLeisureAndEdu", duration, Double::sum);
}
if (!actType.equals("education")) {
sums.mergeDouble("notAtHomeExceptEdu", duration, Double::sum);
}
}
}
}
}
}
return sums;
}
See More Examples