MySql教程

ScalikeJDBC,操作mysql数据,API

本文主要是介绍ScalikeJDBC,操作mysql数据,API,对大家解决编程问题具有一定的参考价值,需要的程序猿们随着小编来一起学习吧!

ScalikeJDBC,操作mysql数据,API

  • 一、构建maven项目,添加pom.xml依赖
  • 二、resource文件下创建application.conf文件,并配置以下内容
  • 三、操作mysql数据库实例
  • 四、直接在代码中进行连接初始化,省去(二)
  • 五、ScalikeJDBC操作API
    • 5.1 查询API
      • single查询
      • 返回多行结果中的第一行
      • 返回List的结果
      • Foreach操作
      • 设置JDBC fetchSize
      • 实现自定义的抽取器(Extractor)
      • 使用ParameterBinder
    • 5.2 更新API
    • 5.3 Execute API
    • 5.4 批处理(Batch)API

一、构建maven项目,添加pom.xml依赖
 <properties>
        <scala.version>2.11.8</scala.version>
        <scalikejdbc.version>3.3.2</scalikejdbc.version>
    </properties>

        <!-- 添加scalikejdbc依赖 -->
        <!-- https://mvnrepository.com/artifact/org.scalikejdbc/scalikejdbc -->
        <dependency>
            <groupId>org.scalikejdbc</groupId>
            <artifactId>scalikejdbc_2.11</artifactId>
            <version>${scalikejdbc.version}</version>
        </dependency>

        <dependency>
            <groupId>org.scalikejdbc</groupId>
            <artifactId>scalikejdbc-config_2.11</artifactId>
            <version>${scalikejdbc.version}</version>
        </dependency>
二、resource文件下创建application.conf文件,并配置以下内容
# JDBC settings

db.default.driver="com.mysql.jdbc.Driver"
db.default.url="jdbc:mysql://localhost:3306//spark?characterEncoding=uft-8"
db.default.user="root"
db.default.password="123456"
# Connection Pool settings
db.default.poolInitialSize=10
db.default.poolMaxSize=20
db.default.connectionTimeoutMillis=1000

# Connection Pool settings
db.default.poolInitialSize=5
db.default.poolMaxSize=7
db.default.poolConnectionTimeoutMillis=1000
db.default.poolValidationQuery="select 1 as one"
db.default.poolFactoryName="commons-dbcp2"

db.legacy.driver="org.h2.Driver"
db.legacy.url="jdbc:h2:file:./db/db2"
db.legacy.user="foo"
db.legacy.password="bar"

# MySQL example
db.default.driver="com.mysql.jdbc.Driver"
db.default.url="jdbc:mysql://localhost/scalikejdbc"

# PostgreSQL example
db.default.driver="org.postgresql.Driver"
db.default.url="jdbc:postgresql://localhost:5432/scalikejdbc"
三、操作mysql数据库实例
import scalikejdbc.{ConnectionPool, DB, SQL}
import scalikejdbc.config.DBs

case class User(id: Int, name: String, age: Int)

object ScalaLikeJdbc {
  def main(args: Array[String]): Unit = {
    // 加载驱动
    classOf[com.mysql.jdbc.Driver]
    //    Class.forName("com.mysql.jdbc.Driver")
    //解析application.conf的文件
    DBs.setup()
    //    createTable()
    //    println("User2表创建完毕")
    //    val userLists = List(User(1, "zhangsan", 18), User(2, "lisi", 20), User(3, "wangwu", 35))
    //    insert(userLists)
    //    println("批量插入完毕")
    //    println(selectAll())
    //    println(selectByID(2))
    //    updateByID(2,60)
    //    println(selectByID(2))

    deleteByID(2)
    println(selectAll())
    DBs.close()
  }

  def createTable(): Unit = {
    DB.autoCommit { implicit session =>
      SQL("create table user2(\nid int not null auto_increment,\nname varchar(100) not null,\nage int,\nprimary key ( id )\n)engine=innodb default charset=utf8; ")
        .execute.apply()
    }
  }

  def insert(users: List[User]): Unit = {
    DB.localTx { implicit session =>
      for (user <- users) {
        SQL("insert into user2(id,name,age) values(?,?,?)")
          .bind(user.id, user.name, user.age)
          .update().apply()
      }

    }
  }

  //3、查询所有
  def selectAll(): List[User] = {
    val list: List[User] = DB.readOnly {
      implicit session =>
        SQL("SELECT * from user2").map(rs => User(rs.int(1), rs.string(2), rs.int(3))).list().apply()
    }
    list
  }

  def selectByID(id: Int): Option[User] = {
    val list: Option[User] = DB.readOnly {
      implicit session =>
        SQL(s"select id,name,age from user2 where id = ${id}").map(rs => User(rs.int(1), rs.string(2), rs.int(3))).single.apply()
    }
    list
  }

  def updateByID(id: Int, age: Int): Unit = {
    DB.localTx {
      implicit session =>
        SQL(s"update user2 set age = ?  where id = ${id}").bind(age).update().apply()
    }
  }

  def deleteByID(id: Int): Unit = {
    DB.localTx {
      implicit session =>
        SQL(s"delete from user2 where id = ${id}").update().apply()
    }
  }
}
四、直接在代码中进行连接初始化,省去(二)
import scalikejdbc.config._
import scalikejdbc._
import scala.collection.mutable.ListBuffer

object ScalikejdbcApp {
  Class.forName("com.mysql.jdbc.Driver")
  ConnectionPool.singleton("jdbc:mysql://192.168.xx.xx:3306/spark","root","123456")
  
  implicit val session = AutoSession

  def main(args: Array[String]): Unit = {
    create
    //insert(1,"ruoruo")
    //highlevelinsert(List(3,4),List("JJ","星星"))//顺序不连续没关系,但是id有重复就会报错
    //update(4,"xingxing")
    println(select())
    delete()
    ConnectionPool.close()//用完连接池要关闭
  }
  
  def create = {
    implicit val session = AutoSession
    sql"""
       CREATE TABLE IF NOT EXISTS Person(
         id int PRIMARY KEY NOT NULL auto_increment,
         name varchar(64),
         created_time timestamp not null DEFAULT current_timestamp
      )ENGINE=InnoDB  DEFAULT CHARSET=utf8 AUTO_INCREMENT=1
      """.execute.apply()
    //如果你不想字段为 NULL 可以设置字段的属性为 NOT NULL, 在操作数据库时如果输入该字段的数据为NULL ,就会报错。
    //PRIMARY KEY关键字用于定义列为主键。 您可以使用多列来定义主键,列间以逗号分隔
    //AUTO_INCREMENT定义列为自增的属性,一般用于主键,数值会自动加1
    //ENGINE 设置存储引擎,CHARSET 设置编码
  }
  
  //插入一条数据
    def insert(id:Int,name:String ): Unit ={
      implicit val session=AutoSession
      sql"""insert into Person(id,name)values (${id},${name})""".update.apply()
    }
    
  //插入两条数据。
  def highlevelinsert(id:List[Int],name:List[String])={
    sql"""insert into Person(id,name)values(${id(0)},${name(0)}),(${id(1)},${name(1)}) """.update().apply()
    println(s"${id}(0),${name(0)}")//List(3, 4)(0),JJ
  }
  
  //更新数据
  def update(id:Int,name:String)={
    implicit val session=AutoSession
    sql"update Person set name=${name}where id =${id}".update().apply()
  }
  
  //查询数据
  def select()={
    implicit val session=AutoSession
    //sql"select * from Person".map(x=>x.string("name")).list().apply()//List(ruoruo, J?, xingxing)
    //sql"select * from Person where Person.id=4".map(x=>x.string("name")).single().apply()//Some(xingxing)
   // sql"select * from Person where Person.id=4".map(x=>x.string("name")).single().apply().get//xingxing
    sql"select * from Person".map(x=>(x.string("id"),x.string("name"))).list().apply()//List((1,ruoruo), (3,J?), (4,xingxing))
  }
  //删除数据
def delete={
  //sql"delete from Person where person.id=3".update()//删除id=3,name=J总这条数据
  //sql"delete from Person".update()//删除Person这张表里面的所有数据,但是该表依然存在
  sql"drop table if exists  person".update()//删除整张表
	}
}
五、ScalikeJDBC操作API

5.1 查询API

ScalikeJDBC中有多种查询API,包括single, first, list 和foreach,他们内部都是调用java.sql.PreparedStatement#executeQuery()实现的。

single查询

single函数返回匹配到的单行数据,并且封装成Option值。如果single函数匹配到多行,那么在运行的时候会抛出异常。使用single函数如下:

import scalikejdbc._
 
val id = 123
 
// simple example
val name: Option[String] = DB readOnly { implicit session =>
  sql"select name from emp where id = ${id}".map(rs => rs.string("name")).single.apply()
}
 
// defined mapper as a function
val nameOnly = (rs: WrappedResultSet) => rs.string("name")
val name: Option[String] = DB readOnly { implicit session =>
  sql"select name from emp where id = ${id}".map(nameOnly).single.apply()
}
 
// define a class to map the result
case class Emp(id: String, name: String)
val emp: Option[Emp] = DB readOnly { implicit session =>
  sql"select id, name from emp where id = ${id}"
    .map(rs => Emp(rs.string("id"), rs.string("name"))).single.apply()
}
 
// QueryDSL
object Emp extends SQLSyntaxSupport[Emp] {
  def apply(e: ResultName[Emp])(rs: WrappedResultSet): Emp = new Emp(id = rs.get(e.id), name = rs.get(e.name))
}
val e = Emp.syntax("e")
val emp: Option[Emp] = DB readOnly { implicit session =>
  withSQL { select.from(Emp as e).where.eq(e.id, id) }.map(Emp(e.resultName)).single.apply()
}

返回多行结果中的第一行

first函数返回多行结果中的第一行结果,而且返回的类型也是Option封装的。

val name: Option[String] = DB readOnly { implicit session =>
  sql"select name from emp".map(rs => rs.string("name")).first.apply()
}
 
val e = Emp.syntax("e")
val name: Option[String] = DB readOnly { implicit session =>
  withSQL { select(e.result.name).from(Emp as e) }.map(_.string(e.name)).first.apply()
}

返回List的结果

list函数将匹配到的多行存储在scala.collection.immutable.List中:

val name: List[String] = DB readOnly { implicit session =>
  sql"select name from emp".map(rs => rs.string("name")).list.apply()
}
 
val e = Emp.syntax("e")
val name: Option[String] = DB readOnly { implicit session =>
  withSQL { select(e.result.name).from(Emp as e) }.map(_.string(e.name)).list.apply()
}

Foreach操作

foreach函数允许你在iterations中进行一些有副作用的计算,这个函数在ResultSet含有大量的返回值情况下特别有用。

DB readOnly { implicit session =>
  sql"select name from emp".foreach { rs => 
    out.write(rs.string("name")) 
  }
}
 
val e = Emp.syntax("e")
DB readOnly { implicit session =>
  withSQL { select(e.name).from(Emp as e) }.foreach { rs => 
    out.write(rs.string(e.name)) 
  }
}

设置JDBC fetchSize

PostgreSQL的JDBC驱动默认情况下(fetchSize=0)将无限制地获取返回的结果,这种情况会导致内存相关的问题:

在ScalikeJDBC 2.0.5之后,我们可以设置JDBC的fetchSize值:

val e = Emp.syntax("e")
DB readOnly { implicit session =>
  sql"select name from emp"
    .fetchSize(1000)
    .foreach { rs => out.write(rs.string("name")) }
}

或者直接在scalikejdbc.DBSession上设置fetchSize:

val (e, c) = (Emp.syntax("e"), Cmp.syntax("c"))
 
DB readOnly { implicit session =>
  session.fetchSize(1000)
 
  withSQL { select(e.name).from(Emp as e) }.foreach { rs => 
    out.write(rs.string(e.name) 
  }
  withSQL { select(c.name).from(Cmp as c) }.foreach { rs => 
    out.write(rs.string(c.name)) 
  }
}

实现自定义的抽取器(Extractor)

def toMap(rs: WrappedResultSet): Map[String, Any] =  {
  (1 to rs.metaData.getColumnCount).foldLeft(Map[String, Any]()) { (result, i) =>
    val label = rs.metaData.getColumnLabel(i)
    Some(rs.any(label)).map { nullableValue => result + (label -> nullableValue) }.getOrElse(result)
  }
}
 
sql"select * from emp".map(rs => toMap(rs)).single.apply()

使用ParameterBinder

ParameterBinder[A]使得我们可以在ScalikeJDBC中自定义如何将参数和PreparedStatement进行绑定。下面的例子将展示如何在InputStream和PreparedStatement进行绑定的情况使用ResultSet#setBinaryStream:

sql"create table blob_example (id bigint, data blob)").execute.apply()
 
val bytes = scala.Array[Byte](1, 2, 3, 4, 5, 6, 7)
 
val bytesBinder = ParameterBinder[InputStream](
  value = new ByteArrayInputStream(bytes),
  binder = (stmt: PreparedStatement, idx: Int) => stmt.setBinaryStream(idx, in, bytes.length)
)
 
sql"insert into blob_example (data) values (${bytesBinder})").update.apply()

5.2 更新API

update最终运行的是java.sql.PreparedStatement#executeUpdate()

import scalikejdbc._
 
DB localTx { implicit session =>
  sql"""insert into emp (id, name, created_at) values (${id}, ${name}, ${DateTime.now})"""
    .update.apply()
  val id = sql"insert into emp (name, created_at) values (${name}, current_timestamp)"
    .updateAndReturnGeneratedKey.apply()
  sql"update emp set name = ${newName} where id = ${id}".update.apply()
  sql"delete emp where id = ${id}".update.apply()
}
 
val column = Emp.column
DB localTx { implicit s =>
  withSQL {
    insert.into(Emp).namedValues(
      column.id -> id,
      column.name -> name,
      column.createdAt -> DateTime.now)
   }.update.apply()
 
  val id: Long = withSQL {
    insert.into(Empy).namedValues(column.name -> name, column.createdAt -> sqls.currentTimestamp)
  }.updateAndReturnGeneratedKey.apply()
 
  withSQL { update(Emp).set(column.name -> newName).where.eq(column.id, id) }.update.apply()
  withSQL { delete.from(Emp).where.eq(column.id, id) }.update.apply()
}

5.3 Execute API

execute最终运行的是java.sql.PreparedStatement#execute().

DB autoCommit { implicit session =>
  sql"create table emp (id integer primary key, name varchar(30))".execute.apply()
}
 
// QueryDSL doesn't support DDL yet.

5.4 批处理(Batch)API

batch和batchByName最终运行的是java.sql.PreparedStatement#executeBatch()

import scalikejdbc._
 
DB localTx { implicit session =>
  val batchParams: Seq[Seq[Any]] = (2001 to 3000).map(i => Seq(i, "name" + i))
  sql"insert into emp (id, name) values (?, ?)".batch(batchParams: _*).apply()
}
 
DB localTx { implicit session =>
  sql"insert into emp (id, name) values ({id}, {name})"
    .batchByName(Seq(Seq('id -> 1, 'name -> "Alice"), Seq('id -> 2, 'name -> "Bob")):_*)
    .apply()
}
 
val column = Emp.column
DB localTx { implicit session =>
  val batchParams: Seq[Seq[Any]] = (2001 to 3000).map(i => Seq(i, "name" + i))
  withSQL {
    insert.into(Emp).namedValues(column.id -> sqls.?, column.name -> sqls.?)
  }.batch(batchParams: _*).apply()
}
这篇关于ScalikeJDBC,操作mysql数据,API的文章就介绍到这儿,希望我们推荐的文章对大家有所帮助,也希望大家多多支持为之网!