第一章
1、创建表并插入数据
stmt = conn.createStatement();// Write sql
String sql = "CREATE TABLE employee_quiz " +
"(" +
"empno INT AUTO_INCREMENT PRIMARY KEY, " +
"ename VARCHAR(128), " +
"age INT" +
")";// Execute sql
stmt.executeUpdate(sql);
stmt = conn.createStatement();
// Write sql
String sql2 = "INSERT INTO employee_quiz VALUES (10001, 'George', 48),(10002, 'Bezalel', 21),(10006 ,'Anneke',19)";
// Execute sql to insert data into employee
stmt.executeUpdate(sql2);
2更新数据
stmt = conn.createStatement();// Write sql
String sql = "UPDATE employee_quiz SET age = 34 WHERE empno = '10001'";// Execute sql
stmt.executeUpdate(sql);
3删除数据
stmt = conn.createStatement();// Write sql
String sql = "DELETE FROM employee_quiz WHERE empno = '10006'";// Execute sql
stmt.executeUpdate(sql);
4创建视图
stmt = conn.createStatement();// Execute sql
stmt.executeUpdate("CREATE OR REPLACE VIEW employee_bak AS SELECT * FROM employee_quiz;");
5使用函数插入数据
stmt = conn.createStatement();
// Write sql
String sql2 = "INSERT INTO user VALUES (1,NOW(),PASSWORD(‘123456’))";
// Execute sql to insert data into employee
stmt.executeUpdate(sql2);
第二章
1建立表与索引
Sequoiadb db = new Sequoiadb("sdbserver1", 11810,"sdbadmin", "sdbadmin");
db.createCollectionSpace("company ");
CollectionSpace cs = db.getCollectionSpace("company ");
DBCollection cl = cs.createCollection("employee");
BasicBSONObject column = new BasicBSONObject();// Index contains fields
column.put("empno", 1);// Create a unique index.
cl.createIndex("idx_empno", column, true, false);
插入数据
List<BSONObject> records = new ArrayList<>();
BasicBSONObject r1 = new BasicBSONObject();
r1.put("empno", 10001);
r1.put("ename", "Georgi");
r1.put("age", 48);
records.add(r1);
BasicBSONObject r2 = new BasicBSONObject();
r2.put("empno", 10002);
r2.put("ename", "Bezalel");
r2.put("age", 21);
records.add(r2);
cl.insert(records);
匹配条件并更新
BSONObject matcher = new BasicBSONObject();
BSONObject et = new BasicBSONObject();
et.put("$et", 10001);
matcher.put("empno", et);// Set the modified value.
BSONObject modifier = new BasicBSONObject();
BSONObject value = new BasicBSONObject();
value.put("age", 34);
modifier.put("$set", value);// Execute update operation.
cl.update(matcher, modifier, null);
插入列表型数据
BasicBSONObject record = new BasicBSONObject();// Add attributes for the personnel.
record.put("ename", "Mike");
record.put("empno", 10007);// Add an array type attribute for the personnel favorite.// favorite:[basketball,football]
BasicBSONList favorite = new BasicBSONList();
favorite.add("skiing");
favorite.add("swimming");// Insert the favorite array into the properties of the personnel record.
record.put("favorite", favorite);// Insert this record into the employee collection.
cl.insert(record);
第三章
1
获取链接
AWSCredentials credentials =
new BasicAWSCredentials("ABCDEFGHIJKLMNOPQRST",
"abcdefghijklmnopqrstuvwxyz0123456789ABCD");
String endPoint = "http://127.0.0.1:8002";
AwsClientBuilder.EndpointConfiguration endpointConfiguration =
new AwsClientBuilder.EndpointConfiguration(endPoint, null);
//Create the S3 connection object
s3 = AmazonS3ClientBuilder.standard()
.withEndpointConfiguration(endpointConfiguration)
.withCredentials(
new AWSStaticCredentialsProvider(credentials)
).build();
创建存储桶
s3.createBucket("novelbucket");
2
统一放在一个方法里即可
把以下代码,粘贴到LobQuiz.java的public void createCS方法的TODO和TODO END之间,运行即可;
Sequoiadb db = new Sequoiadb("sdbserver1", 11810, "", "");
CollectionSpace cs = db.createCollectionSpace("school");//Get collection object
BasicBSONObject mainOptions = new BasicBSONObject();
mainOptions.put("ShardingKey",new BasicBSONObject("date",1));
mainOptions.put("ShardingType", "range");//Claim the main collection
mainOptions.put("IsMainCL", true);
mainOptions.put("LobShardingKeyFormat","YYYYMMDD");
DBCollection maincl = cs.createCollection("student",mainOptions);
BasicBSONObject subOptions = new BasicBSONObject();//Set the partition key
subOptions.put("ShardingKey",new BasicBSONObject("sid",1));//Set the partition method
subOptions.put("ShardingType", "hash");
DBCollection subCL1 = cs.
createCollection("student_202004", subOptions);
DBCollection subCL2 = cs.
createCollection("student_202005", subOptions);
BasicBSONObject attachOptions = new BasicBSONObject();
attachOptions.put("LowBound", new BasicBSONObject("date", "20200401"));
attachOptions.put("UpBound", new BasicBSONObject("date", "20200501"));//Get the main table object
BasicBSONObject attachOptions2 = new BasicBSONObject();
attachOptions2.put("LowBound", new BasicBSONObject("date", "20200501"));
attachOptions2.put("UpBound", new BasicBSONObject("date", "20200601"));//Get the main table object
maincl.attachCollection("school.student_202004", attachOptions);
maincl.attachCollection("school.student_202005", attachOptions2);
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");
Date date1 = sdf.parse("2020-04-05");
ObjectId lobID = maincl.createLobID(date1);
DBLob lob = maincl.createLob(lobID);
FileInputStream fileInputStream = new FileInputStream("/home/sdbadmin/sequoiadb.txt" );//Write data to the Lob
lob.write(fileInputStream);//Close the Lob
lob.close();
DBCursor dbCursor = maincl.listLobs();
while(dbCursor.hasNext()){
BSONObject record = dbCursor.getNext();
System.out.println(record.toString());
}
四
MappingCollection.java
Connection connection2 = DriverManager.getConnection(
"jdbc:hive2://sdbserver1:10000/quiz",// Hive JDBC connection url
"sdbadmin",// Hive JDBC connection user name
""// Hive JDBC connection password (authentication is not enabled by default)
);
// Create Statement
Statement statement = connection2.createStatement();
// Drop the existing employee table
String dropTable =
"DROP TABLE IF EXISTS quiz.employee1";
// Execute the SQL statement of drop table
statement.execute(dropTable);
String dropTable2 =
"DROP TABLE IF EXISTS quiz.employee2";
// Execute the SQL statement of drop table
statement.execute(dropTable2);
// Create a mapping table for the employee collection
String mappingTable =
"CREATE TABLE quiz.employee1 " +
"USING com.sequoiadb.spark " +
"OPTIONS( " +
"host 'sdbserver1:11810', " +
"collectionspace 'quiz', " +
"collection 'employee1', " +
"user 'sdbadmin'," +
"password 'sdbadmin'" +
")";
String mappingTable2 =
"CREATE TABLE quiz.employee2 " +
"USING com.sequoiadb.spark " +
"OPTIONS( " +
"host 'sdbserver1:11810', " +
"collectionspace 'quiz', " +
"collection 'employee2', " +
"user 'sdbadmin'," +
"password 'sdbadmin'" +
")";
// Execute the SQL statement of create mapping table
statement.execute(mappingTable);
statement.execute(mappingTable2);
CreateDatabase.java
try {
// Load Hive JDBC driver
Class.forName("org.apache.hive.jdbc.HiveDriver");
// Create Hive JDBC connection
Connection connection = DriverManager.getConnection(
url,// Hive JDBC connection url
"sdbadmin",// Hive JDBC connection user name
""// Hive JDBC connection password (authentication is not enabled by default)
);
Statement statement = null;
// Initialize ResultSet
ResultSet resultSet = null;
try {
// SQL statement of create database
String createDatabaseSQL = "CREATE DATABASE IF NOT EXISTS quiz";
// Create Statement
statement = connection.createStatement();
// Execute the SQL statement of create database
statement.execute(createDatabaseSQL);
statement = null;
// Initialize ResultSet
resultSet = null;
// TODO END
} catch (SQLException e) {
e.printStackTrace();
}
} catch (SQLException e) {
e.printStackTrace();
}
}
CountEmployee.java
Connection connection2 = DriverManager.getConnection(
"jdbc:hive2://sdbserver1:10000/quiz",// Hive JDBC connection url
"sdbadmin",// Hive JDBC connection user name
""// Hive JDBC connection password (authentication is not enabled by default)
);
// Create Statement
Statement statement = connection2.createStatement();
// Drop the existing employee table
String mappingTable2 =
"create table quiz.employee_count "+
"USING com.sequoiadb.spark " +
"OPTIONS( " +
"host 'sdbserver1:11810', " +
"collectionspace 'quiz', " +
"collection 'employee_count', " +
"user 'sdbadmin'," +
"password 'sdbadmin'" +
")"+
"as select sex,count(*) as num from quiz.employee1 group by sex";
statement.execute(mappingTable2);
BulkInsert.java
Connection connection2 = DriverManager.getConnection(
"jdbc:hive2://sdbserver1:10000/quiz",// Hive JDBC connection url
"sdbadmin",// Hive JDBC connection user name
""// Hive JDBC connection password (authentication is not enabled by default)
);
// Create Statement
Statement statement = connection2.createStatement();
// Drop the existing employee table
String inserts = "insert into employee1 select * from employee2";
// Execute the SQL statement of create mapping table
statement.execute(inserts);
// TODO END
}
五 直接粘贴即可
package com.sequoiadb.lesson.flink.quiz;
import com.chaoc.flink.streaming.connectors.SequoiadbSink;
import com.chaoc.flink.streaming.connectors.SequoiadbSource;
import com.chaoc.flink.streaming.connectors.option.SequoiadbOption;
import com.sequoiadb.lesson.flink.common.FlinkQuizCheck;
import com.sequoiadb.lesson.flink.common.Init;
import org.apache.flink.api.common.JobExecutionResult;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.streaming.api.datastream.*;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.windowing.WindowFunction;
import org.apache.flink.streaming.api.windowing.windows.GlobalWindow;
import org.apache.flink.util.Collector;
import org.bson.BSONObject;
import org.bson.BasicBSONObject;
import org.bson.types.BSONDecimal;
import java.util.Iterator;
/**
* 考试
*/
public class AssignmentMain {
/**
* 请在此处添加数据源
*
* 通过使用 SequoiadbSource 将巨杉数据库中的集合作为数据源添加到执行环境
* @param env 流执行环境
* @return
*/
private static DataStream<BSONObject> source(StreamExecutionEnvironment env) {
DataStreamSource<BSONObject> dataSource = null;
// TODO code 1
SequoiadbOption option = SequoiadbOption.bulider()
.host("localhost:11810")
.username("sdbadmin")
.password("sdbadmin")
.collectionSpaceName("VIRTUAL_BANK")
.collectionName("TRANSACTION_FLOW")
.build();
// Add a data source to the current environment (SequoiadbSource needs to build a stream through the time field "create_time")
dataSource = env.addSource(new SequoiadbSource(option, "create_time"));
// TODO END
return dataSource;
}
/**
* 请在此处实现数据类型转换
*
* 通过 map 算子将 BSONObject 转换为 Tuple3<String, Double, Integer> 类型
* @param transData 原始数据集
* @return
*/
private static DataStream<Tuple3<String, Double, Integer>> map(DataStream<BSONObject> transData) {
DataStream<Tuple3<String, Double, Integer>> resultData = null;
// TODO code 2
resultData = transData.map(new MapFunction<BSONObject,
Tuple3<String, Double, Integer>>() {
/**
* Execute on every event
* @param object Original event
* @return
* @throws Exception
*/
@Override
public Tuple3<String, Double, Integer> map(BSONObject object)
throws Exception {
// Extract the required fields
return Tuple3.of(object.get("trans_name").toString(), ((BSONDecimal) object.get("money")).toBigDecimal().doubleValue(), 1);
}
});
// TODO END
return resultData;
}
/**
* 请在此处按照 Tuple3 中的第一个字段进行分组
*
* 通过 keyBy 算子完成分组操作
* @param moneyData 类型转换后的数据集
* @return
*/
private static KeyedStream<Tuple3<String, Double, Integer>, Tuple> keyBy(DataStream<Tuple3<String,
Double, Integer>> moneyData) {
KeyedStream<Tuple3<String, Double, Integer>, Tuple> resultData = null;
// TODO code 3
resultData = moneyData.keyBy(0);
// TODO END
return resultData;
}
/**
* 请在此处完成开窗
*
* 通过 countWindow 算子对数据集进行开窗,此处请使用滑动计数窗口,窗口大小为 100,滑动步长为 50
* @param keyedData 分组后的数据集
* @return
*/
private static WindowedStream<Tuple3<String, Double, Integer>, Tuple, GlobalWindow> countWindow(
KeyedStream<Tuple3<String, Double, Integer>, Tuple> keyedData) {
WindowedStream<Tuple3<String, Double, Integer>, Tuple, GlobalWindow> resultData = null;
// TODO code 4
resultData = keyedData.countWindow(100, 50);
// TODO END
return resultData;
}
/**
* 请在此处统计每个窗口内的每种交易的交易总额和交易次数
*
* 通过 apply 算子迭代遍历所有事件完成交易总额与交易次数的统计
* @param countWindow 开窗后的数据集
* @return
*/
private static DataStream<Tuple2<String, Double>> reduce(WindowedStream<Tuple3<String, Double, Integer>,
Tuple, GlobalWindow> countWindow) {
DataStream<Tuple2<String, Double>> resultData = null;
// TODO code 5
resultData = countWindow.apply(new WindowFunction<Tuple3<String, Double, Integer>, Tuple2<String, Double>, Tuple, GlobalWindow>() {
/**
* Execute when the window meets the conditions, which similar to the flatMap operator
* @param tuple Group field value. Since the subscript was used for grouping, the specific data type cannot be obtained, so the Tuple abstract representation is used here.
* @param globalWindow Global window reference
* @param iterable References to all data sets in the current window
* @param collector Result collector
* @throws Exception
*/
@Override
public void apply(Tuple tuple, GlobalWindow globalWindow, Iterable<Tuple3<String, Double, Integer>> iterable,
Collector<Tuple2<String, Double>> collector) throws Exception {
double sum = 0;
Iterator<Tuple3<String, Double, Integer>> iterator = iterable.iterator();
while (iterator.hasNext()) {
sum += iterator.next().f1;
}
collector.collect(Tuple2.of(tuple.getField(0), sum));
}
});
// TODO END
return resultData;
}
/**
* 请将数据格式转换为 BSONObject
*
* 通过 map 算子将 Tuple2 转换为 BSONObject
* @param dataStream 聚合后的结果数据集
* @return
*/
private static DataStream<BSONObject> toBson(DataStream<Tuple2<String, Double>> dataStream) {
DataStream<BSONObject> bsonData = null;
// TODO code 6
bsonData = dataStream.map(new MapFunction<Tuple2<String, Double>, BSONObject>() {
@Override
public BSONObject map(Tuple2<String, Double> value) throws Exception {
BasicBSONObject obj = new BasicBSONObject();
obj.append("trans_name", value.f0);
obj.append("total_sum", value.f1);
return obj;
}
});
// TODO END
return bsonData;
}
/**
* 请向结果 DataStream 中添加 Sink
*
* 通过添加 SequoiadbSink 将结果写入到 SequoiaDB
* @param dataStream 类型转换为 BSONObject 的数据集
* @return
*/
private static DataStreamSink<BSONObject> sink(DataStream<BSONObject> dataStream) {
DataStreamSink<BSONObject> streamSink = null;
// TODO code 7
SequoiadbOption option = SequoiadbOption.bulider()
.host("localhost:11810")
.username("sdbadmin")
.password("sdbadmin")
.collectionSpaceName("VIRTUAL_BANK")
.collectionName("ASSIGNMENT")
.build();
streamSink = dataStream.addSink(new SequoiadbSink(option));
// TODO END
return streamSink;
}
/**
* 程序主干,严禁修改
*
* @param args 参数
* @throws Exception
*/
public static void main(String[] args) throws Exception {
Init.initCollection(true, "ASSIGNMENT_CHECK", "ASSIGNMENT");
// 获取执行环境
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// 通过数据源读入数据
DataStream<BSONObject> transSource = source(env);
// 对数据进行map转换
DataStream<Tuple3<String, Double, Integer>> moneyData = map(transSource);
FlinkQuizCheck.metricTuple3(moneyData);
// 数据分组
KeyedStream<Tuple3<String, Double, Integer>, Tuple> keyedData = keyBy(moneyData);
FlinkQuizCheck.metricKeyed(keyedData);
// 通过window进行分组
WindowedStream<Tuple3<String, Double, Integer>, Tuple, GlobalWindow> countWindow = countWindow(keyedData);
FlinkQuizCheck.metricWindow(countWindow);
// 聚合求结果
DataStream<Tuple2<String, Double>> sumData = reduce(countWindow);
// 转换结果为BsonObject
DataStream<BSONObject> bsonDataStream = toBson(sumData);
// 将结果写入sequoiadb
sink(bsonDataStream);
// 执行流作业
JobExecutionResult executionResult = env.execute("flink window");
FlinkQuizCheck.metricSave(executionResult);
}
}
查看全文
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.dgrt.cn/a/2230306.html
如若内容造成侵权/违法违规/事实不符,请联系一条长河网进行投诉反馈,一经查实,立即删除!
相关文章:
巨衫数据库 SequoiaDB-v3 SCDD认证答案(仅供参考)
第一章
1、创建表并插入数据
stmt conn.createStatement();// Write sql
String sql "CREATE TABLE employee_quiz " "(" "empno INT AUTO_INCREMENT PRIMARY KEY, " "ename VARCHAR(128), " "age INT" ")&q……
个人练习-Leetcode-1588. Sum of All Odd Length Subarrays
题目链接:https://leetcode.cn/problems/sum-of-all-odd-length-subarrays/
题目大意:给出一个数组,求其中所有长度为奇数的子列的所有元素和。
思路:虽然写着是简单题(暴力做可以通过),但一看……
2023年全国最新高校辅导员精选真题及答案45
百分百题库提供高校辅导员考试试题、辅导员考试预测题、高校辅导员考试真题、辅导员证考试题库等,提供在线做题刷题,在线模拟考试,助你考试轻松过关。 14.人的亲和行为会受到外貌、距离、熟悉性和相似性等的影响。
答案:正确 16.……
操作系统(三)——内存管理
文章目录第三章 内存管理[3.1.1] 内存的基础知识(一)什么是内存?有何作用?补充知识:几个常用的数量单位(二)指令的工作原理(三)装入的三种方式(1)……
ipmi+nfs挂载实现安装操作系统(解决无usb/Kvm远程挂载太卡问题)
前言
背景:经常安装各种os;因为人在成都,机房都在北京和深圳,每次打开ipmi远程Kvm挂载镜像,要么巨慢,要么直接卡死,很难受,看到ipmi上的nfs挂载,就用虚拟机配了一台作为……
Shiro整合SpringBoot项目实战
✅作者简介:2022年博客新星 第八。热爱国学的Java后端开发者,修心和技术同步精进。 🍎个人主页:Java Fans的博客 🍊个人信条:不迁怒,不贰过。小知识,大智慧。 💞当前专栏……
session.createQuery()与createSQLQuery()区别
createQuery与createSQLQuery两者区别是: 前者用的hql语句进行查询,后者可以用sql语句查询 前者以hibernate生成的Bean为对象装入list返回 后者则是以对象数组进行存储 所以使用createSQLQuery有时候也想以hibernate生成的Bean为对象装入list返回&#x……
day28 SpringBootWeb(四)事务AOP
目录 事务&AOP
1. 事务管理
1.1 事务回顾
1.2 案例
1.3 Spring事务管理
1.4 事务进阶
2. AOP基础
2.1 记录方法执行耗时
2.2 AOP快速入门
2.3 执行流程
2.4 AOP核心概念
3. AOP进阶
3.1 通知类型
3.2 通知顺序
3.3 切点表达式
3.4 连接点
4. AOP案例
4.1 ……
jdk自带线程池详解
一、前言
在最近做的一个项目中,需要大量的使用到多线程和线程池,下面就java自带的线程池和大家一起分享。
二、简介
多线程技术主要解决处理器单元内多个线程执行的问题,它可以显著减少处理器单元的闲置时间,增加处理器单元的……
spring boot异步(Async)任务调度
使用Async来实现异步任务调度,具体请参考博客:
http://blog.csdn.net/liuchuanhong1/article/details/52326578
http://blog.csdn.net/liuchuanhong1/article/details/52042520
在没有使用spring boot之前,我们的做法是在配置文件中定义一……
反序列化渗透与攻防(五)之shiro反序列化漏洞
Shiro反序列化漏洞
Shiro介绍
Apache Shiro是一款开源安全框架,提供身份验证、授权、密码学和会话管理。Shiro框架直观、易用,同时也能提供健壮的安全性
Apache Shiro 1.2.4及以前版本中,加密的用户信息序列化后存储在名为remember-me的Cookie中。攻击者可以使用Shiro的默……
vue2+vue3
vue2vue3尚硅谷vue2vue2 课程简介【02:24】vue2 Vue简介【17:59】vue2 Vue官网使用指南【14:07】vue2 搭建Vue开发环境【13:54】vue2 Hello小案例【22:25】了解: 不常用常用:id 更常用 简单class差值总结vue 实例vue 模板 : 先 取 ࿰……
【hello Linux】环境变量
目录 1. 环境变量的概念 2. 常见的环境变量 3. 查看环境变量 4. 和环境变量相关的命令 5. 环境变量的组织方式 6. 通过代码获取环境变量 7. 通过系统调用获取环境变量 Linux🌷 在开始今天的内容之前,先来看一幅图片吧! 不知道你们是否和我一……
【Linux基础】常用命令整理
ls命令
-a选项,可以展示隐藏的文件和文件夹-l选项,以列表形式展示内容-h,需要和-l搭配使用,可以展示文件的大小单位ls -lah等同于la -a -l -h
cd命令(change directory)
语法:cd [Linux路径]……
客快物流大数据项目(一百一十二):初识Spring Cloud
文章目录
初识Spring Cloud
一、Spring Cloud简介
二、SpringCloud 基础架构图…
C和C++中的struct有什么区别
区别一: C语言中: Struct是用户自定义数据类型(UDT)。 C语言中: Struct是抽象数据类型(ADT),支持成员函数的定义。
区别二:
C中的struct是没有权限设置的,……
docker的数据卷详解
数据卷 数据卷是宿主机中的一个目录或文件,当容器目录和数据卷目录绑定后,对方修改会立即同步
一个数据卷可以同时被多个容器同时挂载,一个容器也可以被挂载多个数据卷
数据卷作用:容器数据持久化 /外部机器和容器间接通信 /容器……
13、Qt生成dll-QLibrary方式使用
Qt创建dll,使用QLibrary类方式调用dll
一、创建项目
1、新建项目->其他项目->Empty qmake Project->Choose 2、输入项目名,选择项目位置,下一步 3、选择MinGW,下一步 4、完成 5、.pro中添加TEMPLATE subdirsÿ……
基于mapreduce 的 minHash 矩阵压缩
Minhash作用: 对大矩阵进行降维处理,在进行计算俩个用户之间的相似度。
比如: 俩个用户手机下载的APP的相似度,在一个矩阵中会有很多很多的用户要比较没俩个用户之间的相似度是一个很大的计算任务 如果首先对这个矩阵降维处理&am……
关于hashmap使用迭代器的问题
keySet获得的只是key值的集合,valueSet获得的是value集合,entryset获得的是键值对的集合。 package com.test2.test;import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;public class mapiterator……
编程日记2023/4/16 14:50:37