---初始化项目

This commit is contained in:
2025-09-19 16:14:08 +08:00
parent 902d3d7e3b
commit afee7c03ac
767 changed files with 75809 additions and 82 deletions

View File

@ -0,0 +1,47 @@
package tech.powerjob.server.persistence;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.springframework.data.domain.Page;
import java.io.Serializable;
import java.util.List;
/**
* 分页对象
*
* @author tjq
* @since 2020/4/12
*/
@Data
@NoArgsConstructor
public class PageResult<T> implements Serializable {
/**
* 当前页数
*/
private int index;
/**
* 页大小
*/
private int pageSize;
/**
* 总页数
*/
private int totalPages;
/**
* 总数据量
*/
private long totalItems;
/**
* 数据
*/
private List<T> data;
public PageResult(Page<?> page) {
index = page.getNumber();
pageSize = page.getSize();
totalPages = page.getTotalPages();
totalItems = page.getTotalElements();
}
}

View File

@ -0,0 +1,128 @@
package tech.powerjob.server.persistence;
import com.alibaba.fastjson.JSONArray;
import com.google.common.collect.Lists;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.springframework.data.domain.PageRequest;
import org.springframework.data.domain.Pageable;
import org.springframework.data.domain.Sort;
import org.springframework.data.jpa.domain.Specification;
import tech.powerjob.common.PowerQuery;
import tech.powerjob.common.exception.PowerJobException;
import tech.powerjob.common.request.query.PowerPageQuery;
import javax.persistence.criteria.Predicate;
import java.lang.reflect.Field;
import java.util.List;
/**
* auto convert query to Specification
*
* @author tjq
* @since 2021/1/15
*/
@Slf4j
@SuppressWarnings("unchecked, rawtypes")
public class QueryConvertUtils {
public static <T> Specification<T> toSpecification(PowerQuery powerQuery) {
return (Specification<T>) (root, query, cb) -> {
List<Predicate> predicates = Lists.newLinkedList();
Field[] fields = powerQuery.getClass().getDeclaredFields();
try {
for (Field field : fields) {
field.setAccessible(true);
String fieldName = field.getName();
Object fieldValue = field.get(powerQuery);
if (fieldValue == null) {
continue;
}
if (fieldName.endsWith(PowerQuery.EQUAL)) {
String colName = StringUtils.substringBeforeLast(fieldName, PowerQuery.EQUAL);
predicates.add(cb.equal(root.get(colName), fieldValue));
} else if (fieldName.endsWith(PowerQuery.NOT_EQUAL)) {
String colName = StringUtils.substringBeforeLast(fieldName, PowerQuery.NOT_EQUAL);
predicates.add(cb.notEqual(root.get(colName), fieldValue));
} else if (fieldName.endsWith(PowerQuery.LIKE)) {
String colName = StringUtils.substringBeforeLast(fieldName, PowerQuery.LIKE);
predicates.add(cb.like(root.get(colName), convertLikeParams(fieldValue)));
} else if (fieldName.endsWith(PowerQuery.NOT_LIKE)) {
String colName = StringUtils.substringBeforeLast(fieldName, PowerQuery.NOT_LIKE);
predicates.add(cb.notLike(root.get(colName), convertLikeParams(fieldValue)));
} else if (fieldName.endsWith(PowerQuery.LESS_THAN)) {
String colName = StringUtils.substringBeforeLast(fieldName, PowerQuery.LESS_THAN);
predicates.add(cb.lessThan(root.get(colName), (Comparable)fieldValue));
} else if (fieldName.endsWith(PowerQuery.GREATER_THAN)) {
String colName = StringUtils.substringBeforeLast(fieldName, PowerQuery.GREATER_THAN);
predicates.add(cb.greaterThan(root.get(colName), (Comparable)fieldValue));
} else if (fieldName.endsWith(PowerQuery.LESS_THAN_EQUAL)) {
String colName = StringUtils.substringBeforeLast(fieldName, PowerQuery.LESS_THAN_EQUAL);
predicates.add(cb.lessThanOrEqualTo(root.get(colName), (Comparable)fieldValue));
} else if (fieldName.endsWith(PowerQuery.GREATER_THAN_EQUAL)) {
String colName = StringUtils.substringBeforeLast(fieldName, PowerQuery.GREATER_THAN_EQUAL);
predicates.add(cb.greaterThanOrEqualTo(root.get(colName), (Comparable)fieldValue));
} else if (fieldName.endsWith(PowerQuery.IN)) {
String colName = StringUtils.substringBeforeLast(fieldName, PowerQuery.IN);
predicates.add(root.get(colName).in(convertInParams(fieldValue)));
} else if (fieldName.endsWith(PowerQuery.NOT_IN)) {
String colName = StringUtils.substringBeforeLast(fieldName, PowerQuery.NOT_IN);
predicates.add(cb.not(root.get(colName).in(convertInParams(fieldValue))));
} else if (fieldName.endsWith(PowerQuery.IS_NULL)) {
String colName = StringUtils.substringBeforeLast(fieldName, PowerQuery.IS_NULL);
predicates.add(cb.isNull(root.get(colName)));
} else if (fieldName.endsWith(PowerQuery.IS_NOT_NULL)) {
String colName = StringUtils.substringBeforeLast(fieldName, PowerQuery.IS_NOT_NULL);
predicates.add(cb.isNotNull(root.get(colName)));
}
}
} catch (Exception e) {
log.warn("[QueryConvertUtils] convert failed for query: {}", query, e);
throw new PowerJobException("convert query object failed, maybe you should redesign your query object!");
}
if (powerQuery.getAppIdEq() != null) {
predicates.add(cb.equal(root.get("appId"), powerQuery.getAppIdEq()));
}
return query.where(predicates.toArray(new Predicate[0])).getRestriction();
};
}
public static Pageable toPageable(PowerPageQuery powerPageQuery) {
Sort sorter = null;
String sortBy = powerPageQuery.getSortBy();
if (StringUtils.isNoneEmpty(sortBy)) {
sorter = Sort.by(sortBy);
if (powerPageQuery.isAsc()) {
sorter = sorter.ascending();
} else {
sorter = sorter.descending();
}
}
if (sorter == null) {
return PageRequest.of(powerPageQuery.getIndex(), powerPageQuery.getPageSize());
}
return PageRequest.of(powerPageQuery.getIndex(), powerPageQuery.getPageSize(), sorter);
}
public static String convertLikeParams(Object o) {
String s = (String) o;
if (!s.startsWith("%")) {
s = "%" + s;
}
if (!s.endsWith("%")) {
s = s + "%";
}
return s;
}
private static Object[] convertInParams(Object o) {
// FastJSON, 永远滴神!
return JSONArray.parseArray(JSONArray.toJSONString(o)).toArray();
}
}

View File

@ -0,0 +1,37 @@
package tech.powerjob.server.persistence;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
/**
* 文本分页
*
* @author tjq
* @since 2020/5/3
*/
@Data
@NoArgsConstructor
@AllArgsConstructor
public class StringPage {
/**
* 当前页数
*/
private long index;
/**
* 总页数
*/
private long totalPages;
/**
* 文本数据
*/
private String data;
public static StringPage simple(String data) {
StringPage sp = new StringPage();
sp.index = 0;
sp.totalPages = 1;
sp.data = data;
return sp;
}
}

View File

@ -0,0 +1,78 @@
package tech.powerjob.server.persistence.config;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.orm.jpa.HibernateProperties;
import org.springframework.boot.autoconfigure.orm.jpa.HibernateSettings;
import org.springframework.boot.autoconfigure.orm.jpa.JpaProperties;
import org.springframework.boot.orm.jpa.EntityManagerFactoryBuilder;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.DependsOn;
import org.springframework.data.jpa.repository.config.EnableJpaRepositories;
import org.springframework.orm.jpa.JpaTransactionManager;
import org.springframework.orm.jpa.LocalContainerEntityManagerFactoryBean;
import org.springframework.transaction.PlatformTransactionManager;
import org.springframework.transaction.TransactionDefinition;
import org.springframework.transaction.annotation.EnableTransactionManagement;
import org.springframework.transaction.support.TransactionTemplate;
import javax.annotation.Resource;
import javax.sql.DataSource;
import java.util.Map;
import java.util.Objects;
/**
* 本地H2数据库配置
*
* @author tjq
* @since 2020/4/27
*/
@Configuration
@EnableTransactionManagement
@EnableJpaRepositories(
// repository包名
basePackages = LocalJpaConfig.LOCAL_PACKAGES,
// 实体管理bean名称
entityManagerFactoryRef = "localEntityManagerFactory",
// 事务管理bean名称
transactionManagerRef = "localTransactionManager"
)
public class LocalJpaConfig {
public static final String LOCAL_PACKAGES = "tech.powerjob.server.persistence.local";
private static Map<String, Object> genDatasourceProperties() {
JpaProperties jpaProperties = new JpaProperties();
jpaProperties.setOpenInView(false);
jpaProperties.setShowSql(false);
HibernateProperties hibernateProperties = new HibernateProperties();
// 每次启动都删除数据重启后原来的Instance已经通过故障转移更换了Server老的日志数据也没什么意义了
hibernateProperties.setDdlAuto("create");
return hibernateProperties.determineHibernateProperties(jpaProperties.getProperties(), new HibernateSettings());
}
@Bean(name = "localEntityManagerFactory")
public LocalContainerEntityManagerFactoryBean initLocalEntityManagerFactory(@Qualifier("omsLocalDatasource") DataSource omsLocalDatasource,EntityManagerFactoryBuilder builder) {
return builder
.dataSource(omsLocalDatasource)
.properties(genDatasourceProperties())
.packages(LOCAL_PACKAGES)
.persistenceUnit("localPersistenceUnit")
.build();
}
@Bean(name = "localTransactionManager")
public PlatformTransactionManager initLocalTransactionManager(@Qualifier("localEntityManagerFactory") LocalContainerEntityManagerFactoryBean localContainerEntityManagerFactoryBean) {
return new JpaTransactionManager(Objects.requireNonNull(localContainerEntityManagerFactoryBean.getObject()));
}
@Bean(name = "localTransactionTemplate")
public TransactionTemplate initTransactionTemplate(@Qualifier("localTransactionManager") PlatformTransactionManager ptm) {
TransactionTemplate tt = new TransactionTemplate(ptm);
// 设置隔离级别
tt.setIsolationLevel(TransactionDefinition.ISOLATION_DEFAULT);
return tt;
}
}

View File

@ -0,0 +1,56 @@
package tech.powerjob.server.persistence.config;
import tech.powerjob.server.common.utils.OmsFileUtils;
import com.zaxxer.hikari.HikariConfig;
import com.zaxxer.hikari.HikariDataSource;
import org.apache.commons.io.FileUtils;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.jdbc.DataSourceBuilder;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import javax.sql.DataSource;
import java.io.File;
/**
* 多重数据源配置
*
* @author tjq
* @since 2020/4/27
*/
@Configuration
public class MultiDatasourceConfig {
private static final String H2_DRIVER_CLASS_NAME = "org.h2.Driver";
private static final String H2_JDBC_URL_PATTERN = "jdbc:h2:file:%spowerjob_server_db";
private static final int H2_MIN_SIZE = 4;
private static final int H2_MAX_ACTIVE_SIZE = 10;
@Primary
@Bean("omsRemoteDatasource")
@ConfigurationProperties(prefix = "spring.datasource.core")
public DataSource initOmsCoreDatasource() {
return DataSourceBuilder.create().build();
}
@Bean("omsLocalDatasource")
public DataSource initOmsLocalDatasource() {
String h2Path = OmsFileUtils.genH2WorkPath();
HikariConfig config = new HikariConfig();
config.setDriverClassName(H2_DRIVER_CLASS_NAME);
config.setJdbcUrl(String.format(H2_JDBC_URL_PATTERN, h2Path));
config.setAutoCommit(true);
// 池中最小空闲连接数量
config.setMinimumIdle(H2_MIN_SIZE);
// 池中最大连接数量
config.setMaximumPoolSize(H2_MAX_ACTIVE_SIZE);
// JVM 关闭时删除文件
try {
FileUtils.forceDeleteOnExit(new File(h2Path));
}catch (Exception ignore) {
}
return new HikariDataSource(config);
}
}

View File

@ -0,0 +1,66 @@
package tech.powerjob.server.persistence.config;
import com.google.common.collect.Maps;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.stereotype.Component;
import java.util.*;
/**
* 多重数据源配置
*
* @author Kung Yao
* @since 2020/4/27
*/
@Component
@ConfigurationProperties("spring.datasource")
public class MultiDatasourceProperties {
private DataSourceProperties remote = new DataSourceProperties();
private DataSourceProperties local = new DataSourceProperties();
public static class DataSourceProperties {
private HibernateProperties hibernate = new HibernateProperties();
public void setHibernate(HibernateProperties hibernate) {
this.hibernate = hibernate;
}
public HibernateProperties getHibernate() {
return hibernate;
}
}
public static class HibernateProperties {
private Map<String, String> properties = Maps.newHashMap();
public void setProperties(Map<String, String> properties) {
this.properties = properties;
}
public Map<String, String> getProperties() {
return properties;
}
}
public void setLocal(DataSourceProperties local) {
this.local = local;
}
public void setRemote(DataSourceProperties remote) {
this.remote = remote;
}
public DataSourceProperties getLocal() {
return local;
}
public DataSourceProperties getRemote() {
return remote;
}
}

View File

@ -0,0 +1,47 @@
package tech.powerjob.server.persistence.config;
import tech.powerjob.server.common.PowerJobServerConfigKey;
import tech.powerjob.server.common.utils.PropertyUtils;
import org.hibernate.boot.model.naming.Identifier;
import org.hibernate.engine.jdbc.env.spi.JdbcEnvironment;
import org.springframework.boot.orm.jpa.hibernate.SpringPhysicalNamingStrategy;
import org.apache.commons.lang3.StringUtils;
import java.io.Serializable;
/**
* 自定义表前缀,配置项 oms.table-prefix 不配置时,不增加表前缀。
* 参考实现:{@link org.springframework.boot.orm.jpa.hibernate.SpringPhysicalNamingStrategy}
* <p>
* 1. 继承 PhysicalNamingStrategy 类,实现自定义表前缀;
* </p>
* <p>
* 2. 修改@Query(nativeQuery = true)和其SQL用对象名和属性名代替表名和数据库字段名。
* </p>
*
* @author songyinyin
* @since 2020/7/18
*/
public class PowerJobPhysicalNamingStrategy extends SpringPhysicalNamingStrategy implements Serializable {
/**
* 映射物理表名称,如:把实体表 AppInfoDO 的 DO 去掉,再加上表前缀
*
* @param name 实体名称
* @param jdbcEnvironment jdbc环境变量
* @return 映射后的物理表
*/
@Override
public Identifier toPhysicalTableName(Identifier name, JdbcEnvironment jdbcEnvironment) {
String tablePrefix = PropertyUtils.getProperties().getProperty(PowerJobServerConfigKey.TABLE_PREFIX);
String text = name.getText();
String noDOText = StringUtils.endsWithIgnoreCase(text, "do") ? text.substring(0, text.length() - 2) : text;
String newText = StringUtils.isNotEmpty(tablePrefix) ? tablePrefix + noDOText : noDOText;
return super.toPhysicalTableName(new Identifier(newText, name.isQuoted()), jdbcEnvironment);
}
}

View File

@ -0,0 +1,83 @@
package tech.powerjob.server.persistence.config;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.orm.jpa.HibernateProperties;
import org.springframework.boot.autoconfigure.orm.jpa.HibernateSettings;
import org.springframework.boot.autoconfigure.orm.jpa.JpaProperties;
import org.springframework.boot.orm.jpa.EntityManagerFactoryBuilder;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.data.jpa.repository.config.EnableJpaRepositories;
import org.springframework.orm.jpa.JpaTransactionManager;
import org.springframework.orm.jpa.LocalContainerEntityManagerFactoryBean;
import org.springframework.transaction.PlatformTransactionManager;
import org.springframework.transaction.annotation.EnableTransactionManagement;
import javax.sql.DataSource;
import java.util.Map;
import java.util.Objects;
/**
* 核心数据库 JPA 配置
*
* @author tjq
* @since 2020/4/27
*/
@Configuration
@EnableTransactionManagement
@EnableJpaRepositories(
// repository包名
basePackages = RemoteJpaConfig.CORE_PACKAGES,
// 实体管理bean名称
entityManagerFactoryRef = "remoteEntityManagerFactory",
// 事务管理bean名称
transactionManagerRef = "remoteTransactionManager"
)
public class RemoteJpaConfig {
public static final String CORE_PACKAGES = "tech.powerjob.server.persistence.remote";
/**
* 生成配置文件,包括 JPA配置文件和Hibernate配置文件相当于以下三个配置
* spring.jpa.show-sql=false
* spring.jpa.open-in-view=false
* spring.jpa.hibernate.ddl-auto=update
*
* @return 配置Map
*/
private static Map<String, Object> genDatasourceProperties() {
JpaProperties jpaProperties = new JpaProperties();
jpaProperties.setOpenInView(false);
jpaProperties.setShowSql(false);
HibernateProperties hibernateProperties = new HibernateProperties();
hibernateProperties.setDdlAuto("update");
// 配置JPA自定义表名称策略
hibernateProperties.getNaming().setPhysicalStrategy(PowerJobPhysicalNamingStrategy.class.getName());
HibernateSettings hibernateSettings = new HibernateSettings();
return hibernateProperties.determineHibernateProperties(jpaProperties.getProperties(), hibernateSettings);
}
@Primary
@Bean(name = "remoteEntityManagerFactory")
public LocalContainerEntityManagerFactoryBean initRemoteEntityManagerFactory(@Qualifier("omsRemoteDatasource") DataSource omsRemoteDatasource,@Qualifier("multiDatasourceProperties") MultiDatasourceProperties properties, EntityManagerFactoryBuilder builder) {
Map<String, Object> datasourceProperties = genDatasourceProperties();
datasourceProperties.putAll(properties.getRemote().getHibernate().getProperties());
return builder
.dataSource(omsRemoteDatasource)
.properties(datasourceProperties)
.packages(CORE_PACKAGES)
.persistenceUnit("remotePersistenceUnit")
.build();
}
@Primary
@Bean(name = "remoteTransactionManager")
public PlatformTransactionManager initRemoteTransactionManager(@Qualifier("remoteEntityManagerFactory") LocalContainerEntityManagerFactoryBean localContainerEntityManagerFactoryBean) {
return new JpaTransactionManager(Objects.requireNonNull(localContainerEntityManagerFactoryBean.getObject()));
}
}

View File

@ -0,0 +1,37 @@
package tech.powerjob.server.persistence.config.dialect;
import org.hibernate.dialect.PostgreSQL10Dialect;
import org.hibernate.type.descriptor.sql.LongVarbinaryTypeDescriptor;
import org.hibernate.type.descriptor.sql.LongVarcharTypeDescriptor;
import org.hibernate.type.descriptor.sql.SqlTypeDescriptor;
import java.sql.Types;
/**
* <a href="https://github.com/PowerJob/PowerJob/issues/750">PG数据库方言</a>
* 使用方自行通过配置文件激活spring.datasource.remote.hibernate.properties.hibernate.dialect=tech.powerjob.server.persistence.config.dialect.AdpPostgreSQLDialect
*
* @author litong0531
* @since 2024/8/11
*/
public class AdpPostgreSQLDialect extends PostgreSQL10Dialect {
public AdpPostgreSQLDialect() {
super();
registerColumnType(Types.BLOB, "bytea");
registerColumnType(Types.CLOB, "text");
}
@Override
public SqlTypeDescriptor remapSqlTypeDescriptor(SqlTypeDescriptor sqlTypeDescriptor) {
switch (sqlTypeDescriptor.getSqlType()) {
case Types.CLOB:
return LongVarcharTypeDescriptor.INSTANCE;
case Types.BLOB:
return LongVarbinaryTypeDescriptor.INSTANCE;
case Types.NCLOB:
return LongVarbinaryTypeDescriptor.INSTANCE;
}
return super.remapSqlTypeDescriptor(sqlTypeDescriptor);
}
}

View File

@ -0,0 +1,33 @@
package tech.powerjob.server.persistence.config.dialect;
import org.hibernate.dialect.PostgreSQL10Dialect;
import org.hibernate.type.descriptor.sql.LongVarcharTypeDescriptor;
import org.hibernate.type.descriptor.sql.SqlTypeDescriptor;
import java.sql.Types;
/**
* PostgreSQL 数据库支持,需要在 application.properties 中添加以下配置项进行激活
* spring.datasource.remote.hibernate.properties.hibernate.dialect=tech.powerjob.server.persistence.config.dialect.PowerJobPGDialect
*
* @author Kung Yao
* @author Echo009
* 2021/3/24 下午 04:23
* 1074_King
*/
public class PowerJobPGDialect extends PostgreSQL10Dialect {
/**
* 使用 {@link Types#LONGVARCHAR} 覆盖 {@link Types#CLOB} 类型
*
* 注意,如果在 PG 库创建表时使用的列类型为 oid ,那么这样会导致没法正确读取数据
* 在 PowerJob 中能这样用是因为 PowerJob 的所有实体类中被 @Lob 注解标记的列对应数据库中的字段类型都是 text
* 另外还需要注意数据库版本,如果是 10.x 以前的,需自行提供一个合适的 Dialect 类(选择合适的版本继承)
*
* 更多内容请关注该 issueshttps://github.com/PowerJob/PowerJob/issues/153
*/
@Override
public SqlTypeDescriptor getSqlTypeDescriptorOverride(int sqlCode) {
return Types.CLOB == sqlCode ? LongVarcharTypeDescriptor.INSTANCE : null;
}
}

View File

@ -0,0 +1,47 @@
package tech.powerjob.server.persistence.local;
import tech.powerjob.common.enums.LogLevel;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import javax.persistence.*;
/**
* 本地的运行时日志
*
* @author tjq
* @since 2020/4/27
*/
@Data
@Entity
@NoArgsConstructor
@AllArgsConstructor
@Table(name = "local_instance_log", indexes = {@Index(columnList = "instanceId")})
public class LocalInstanceLogDO {
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
private Long id;
private Long instanceId;
/**
* 日志时间
*/
private Long logTime;
/**
* 日志级别 {@link LogLevel}
*/
private Integer logLevel;
/**
* 日志内容
*/
@Lob
@Column(columnDefinition="TEXT")
private String logContent;
/**
* 机器地址
*/
private String workerAddress;
}

View File

@ -0,0 +1,37 @@
package tech.powerjob.server.persistence.local;
import com.google.errorprone.annotations.CanIgnoreReturnValue;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.data.jpa.repository.Modifying;
import javax.transaction.Transactional;
import java.util.List;
import java.util.stream.Stream;
/**
* 本地运行时日志数据操作层
*
* @author tjq
* @since 2020/4/27
*/
public interface LocalInstanceLogRepository extends JpaRepository<LocalInstanceLogDO, Long> {
/**
* 流式查询
*/
Stream<LocalInstanceLogDO> findByInstanceIdOrderByLogTime(Long instanceId);
/**
* 删除数据
*/
@Modifying
@Transactional(rollbackOn = Exception.class)
long deleteByInstanceId(Long instanceId);
@Modifying
@Transactional(rollbackOn = Exception.class)
@CanIgnoreReturnValue
long deleteByInstanceIdInAndLogTimeLessThan(List<Long> instanceIds, Long t);
long countByInstanceId(Long instanceId);
}

View File

@ -0,0 +1,90 @@
package tech.powerjob.server.persistence.monitor;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.aspectj.lang.ProceedingJoinPoint;
import org.aspectj.lang.annotation.Around;
import org.aspectj.lang.annotation.Aspect;
import org.springframework.data.domain.Slice;
import org.springframework.stereotype.Component;
import tech.powerjob.server.common.utils.AOPUtils;
import tech.powerjob.server.monitor.MonitorService;
import tech.powerjob.server.monitor.events.db.DatabaseEvent;
import tech.powerjob.server.monitor.events.db.DatabaseType;
import java.util.Collection;
import java.util.Optional;
import java.util.stream.Stream;
/**
* 监控切面
*
* @author tjq
* @since 2022/9/6
*/
@Slf4j
@Aspect
@Component
@RequiredArgsConstructor
public class DatabaseMonitorAspect {
private final MonitorService monitorService;
@Around("execution(* tech.powerjob.server.persistence.remote.repository..*.*(..))")
public Object monitorCoreDB(ProceedingJoinPoint joinPoint) throws Throwable {
return wrapperMonitor(joinPoint, DatabaseType.CORE);
}
@Around("execution(* tech.powerjob.server.persistence.local..*.*(..))")
public Object monitorLocalDB(ProceedingJoinPoint joinPoint) throws Throwable {
return wrapperMonitor(joinPoint, DatabaseType.LOCAL);
}
private Object wrapperMonitor(ProceedingJoinPoint point, DatabaseType type) throws Throwable {
String classNameMini = AOPUtils.parseRealClassName(point);
final String methodName = point.getSignature().getName();
DatabaseEvent event = new DatabaseEvent().setType(type)
.setServiceName(classNameMini)
.setMethodName(methodName)
.setStatus(DatabaseEvent.Status.SUCCESS);
long startTs = System.currentTimeMillis();
try {
final Object ret = point.proceed();
event.setRows(parseEffectRows(ret));
return ret;
} catch (Throwable t) {
event.setErrorMsg(t.getClass().getSimpleName()).setStatus(DatabaseEvent.Status.FAILED);
throw t;
} finally {
long cost = System.currentTimeMillis() - startTs;
monitorService.monitor(event.setCost(cost));
}
}
private static Integer parseEffectRows(Object ret) {
// 从性能角度考虑,最高频场景放在最前面判断
if (ret instanceof Number) {
return ((Number) ret).intValue();
}
if (ret instanceof Optional) {
return ((Optional<?>) ret).isPresent() ? 1 : 0;
}
if (ret instanceof Collection) {
return ((Collection<?>) ret).size();
}
if (ret instanceof Slice) {
return ((Slice<?>) ret).getSize();
}
if (ret instanceof Stream) {
return null;
}
// TODO: 直接返回对象的方法全部改成 Optional
return ret == null ? 0 : 1;
}
}

View File

@ -0,0 +1,65 @@
package tech.powerjob.server.persistence.remote.model;
import lombok.Data;
import org.hibernate.annotations.GenericGenerator;
import javax.persistence.*;
import java.util.Date;
/**
* 应用信息表
*
* @author tjq
* @since 2020/3/30
*/
@Data
@Entity
@Table(uniqueConstraints = {@UniqueConstraint(name = "uidx01_app_info", columnNames = {"appName"})})
public class AppInfoDO {
@Id
@GeneratedValue(strategy = GenerationType.AUTO, generator = "native")
@GenericGenerator(name = "native", strategy = "native")
private Long id;
private String appName;
/**
* 描述
*/
private String title;
/**
* 应用分组密码
*/
private String password;
/**
* 当前负责该 appName 旗下任务调度的server地址IP:Port注意该地址为ActorSystem地址而不是HTTP地址两者端口不同
* 支持多语言后,尽管引入了 vert.x 的地址,但该字段仍保存 ActorSystem 的地址vert.x 地址仅在返回给 worker 时特殊处理
* 原因:框架中很多地方强依赖 currentServer比如根据该地址来获取需要调度的 app
*/
private String currentServer;
/**
* 命名空间ID外键关联
*/
private Long namespaceId;
/**
* 管理标签
*/
private String tags;
/**
* 扩展字段
*/
private String extra;
private Date gmtCreate;
private Date gmtModified;
private Long creator;
private Long modifier;
}

View File

@ -0,0 +1,59 @@
package tech.powerjob.server.persistence.remote.model;
import lombok.Data;
import org.hibernate.annotations.GenericGenerator;
import javax.persistence.*;
import java.util.Date;
/**
* 容器jar容器信息表
*
* @author tjq
* @since 2020/5/15
*/
@Data
@Entity
@Table(indexes = {@Index(name = "idx01_container_info", columnList = "appId")})
public class ContainerInfoDO {
@Id
@GeneratedValue(strategy = GenerationType.AUTO, generator = "native")
@GenericGenerator(name = "native", strategy = "native")
private Long id;
/**
* 所属的应用ID
*/
private Long appId;
private String containerName;
/**
* 容器类型,枚举值为 ContainerSourceType
*/
private Integer sourceType;
/**
* 由 sourceType 决定JarFile -> String存储文件名称Git -> JSON包括 URLbranchusernamepassword
*/
private String sourceInfo;
/**
* 版本 Jar包使用md5Git使用commitId前者32位后者40位不会产生碰撞
*/
private String version;
/**
* 状态,枚举值为 ContainerStatus
*/
private Integer status;
/**
* 上一次部署时间
*/
private Date lastDeployTime;
private Date gmtCreate;
private Date gmtModified;
}

View File

@ -0,0 +1,121 @@
package tech.powerjob.server.persistence.remote.model;
import tech.powerjob.common.enums.InstanceStatus;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.hibernate.annotations.GenericGenerator;
import javax.persistence.*;
import java.util.Date;
/**
* 任务运行日志表
*
* @author tjq
* @since 2020/3/30
*/
@Data
@Entity
@NoArgsConstructor
@AllArgsConstructor
@Table(indexes = {
@Index(name = "idx01_instance_info", columnList = "jobId,status"),
@Index(name = "idx02_instance_info", columnList = "appId,status"),
@Index(name = "idx03_instance_info", columnList = "instanceId,status"),
@Index(name = "idx04_instance_info_outer_key", columnList = "outerKey")
})
public class InstanceInfoDO {
@Id
@GeneratedValue(strategy = GenerationType.AUTO, generator = "native")
@GenericGenerator(name = "native", strategy = "native")
private Long id;
/**
* 任务ID
*/
private Long jobId;
/**
* 任务所属应用的ID冗余提高查询效率
*/
private Long appId;
/**
* 任务所属应用的ID冗余提高查询效率
*/
private Long instanceId;
/**
* 任务参数(静态)
*
* @since 2021/2/01
*/
@Lob
@Column
private String jobParams;
/**
* 任务实例参数(动态)
*/
@Lob
@Column
private String instanceParams;
/**
* 该任务实例的类型,普通/工作流InstanceType
*/
private Integer type;
/**
* 该任务实例所属的 workflow ID仅 workflow 任务存在
*/
private Long wfInstanceId;
/**
* 任务状态 {@link InstanceStatus}
*/
private Integer status;
/**
* 执行结果(允许存储稍大的结果)
*/
@Lob
@Column
private String result;
/**
* 预计触发时间
*/
private Long expectedTriggerTime;
/**
* 实际触发时间
*/
private Long actualTriggerTime;
/**
* 结束时间
*/
private Long finishedTime;
/**
* 最后上报时间
*/
private Long lastReportTime;
/**
* TaskTracker 地址
*/
private String taskTrackerAddress;
/**
* 总共执行的次数(用于重试判断)
*/
private Long runningTimes;
/**
* “外键”,用于 OPENAPI 场景业务场景与 PowerJob 实例的绑定
*/
private String outerKey;
/**
* 扩展属性,用于 OPENAPI 场景上下文参数的透传
*/
private String extendValue;
/**
* 调度元信息
*/
private String meta;
private Date gmtCreate;
private Date gmtModified;
}

View File

@ -0,0 +1,172 @@
package tech.powerjob.server.persistence.remote.model;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.hibernate.annotations.GenericGenerator;
import javax.persistence.*;
import java.util.Date;
/**
* 任务信息表
*
* @author tjq
* @since 2020/3/29
*/
@Data
@Entity
@NoArgsConstructor
@AllArgsConstructor
@Table(indexes = {
@Index(name = "idx01_job_info", columnList = "appId,status,timeExpressionType,nextTriggerTime"),
})
public class JobInfoDO {
@Id
@GeneratedValue(strategy = GenerationType.AUTO, generator = "native")
@GenericGenerator(name = "native", strategy = "native")
private Long id;
/* ************************** 任务基本信息 ************************** */
/**
* 任务名称
*/
private String jobName;
/**
* 任务描述
*/
private String jobDescription;
/**
* 任务所属的应用ID
*/
private Long appId;
/**
* 任务自带的参数
*/
@Lob
@Column
private String jobParams;
/* ************************** 定时参数 ************************** */
/**
* 时间表达式类型CRON/API/FIX_RATE/FIX_DELAY
*/
private Integer timeExpressionType;
/**
* 时间表达式CRON/NULL/LONG/LONG
*/
private String timeExpression;
/* ************************** 执行方式 ************************** */
/**
* 执行类型,单机/广播/MR
*/
private Integer executeType;
/**
* 执行器类型Java/Shell
*/
private Integer processorType;
/**
* 执行器信息
*/
private String processorInfo;
/* ************************** 运行时配置 ************************** */
/**
* 最大同时运行任务数,默认 1
*/
private Integer maxInstanceNum;
/**
* 并发度,同时执行某个任务的最大线程数量
*/
private Integer concurrency;
/**
* 任务整体超时时间
*/
private Long instanceTimeLimit;
/* ************************** 重试配置 ************************** */
private Integer instanceRetryNum;
private Integer taskRetryNum;
/**
* 1 正常运行2 停止(不再调度)
*/
private Integer status;
/**
* 下一次调度时间
*/
private Long nextTriggerTime;
/* ************************** 繁忙机器配置 ************************** */
/**
* 最低CPU核心数量0代表不限
*/
private double minCpuCores;
/**
* 最低内存空间,单位 GB0代表不限
*/
private double minMemorySpace;
/**
* 最低磁盘空间,单位 GB0代表不限
*/
private double minDiskSpace;
/* ************************** 集群配置 ************************** */
/**
* 指定机器运行,空代表不限,非空则只会使用其中的机器运行(多值逗号分割)
*/
private String designatedWorkers;
/**
* 最大机器数量
*/
private Integer maxWorkerCount;
/**
* 报警用户ID列表多值逗号分隔
*/
private String notifyUserIds;
private Date gmtCreate;
private Date gmtModified;
/**
* 扩展参数PowerJob 自身不会使用该数据,留给开发者扩展时使用
* 比如 WorkerFilter 的自定义 worker 过滤逻辑,可在此传入过滤指标 GpuUsage < 10
*/
private String extra;
/**
* 派发策略
*/
private Integer dispatchStrategy;
/**
* 某种派发策略背后的具体配置,值取决于 dispatchStrategy
*/
private String dispatchStrategyConfig;
private String lifecycle;
/**
* 告警配置
*/
private String alarmConfig;
/**
* 任务归类,开放给接入方自由定制
*/
private String tag;
/**
* 日志配置,包括日志级别、日志方式等配置信息
*/
private String logConfig;
/**
* 高级运行时配置
* 不需要用于索引的高级运行参数,后续统一存储到这里,便于版本升级(尽可能保证数据库表结构稳定)
*/
private String advancedRuntimeConfig;
}

View File

@ -0,0 +1,65 @@
package tech.powerjob.server.persistence.remote.model;
import lombok.Data;
import org.hibernate.annotations.GenericGenerator;
import javax.persistence.*;
import java.util.Date;
/**
* 命名空间,用于组织管理 App
*
* @author tjq
* @since 2023/9/3
*/
@Data
@Entity
@Table(uniqueConstraints = {@UniqueConstraint(name = "uidx01_namespace", columnNames = {"code"})})
public class NamespaceDO {
@Id
@GeneratedValue(strategy = GenerationType.AUTO, generator = "native")
@GenericGenerator(name = "native", strategy = "native")
private Long id;
/**
* 空间唯一标识
*/
private String code;
/**
* 空间名称比如中文描述XX部门XX空间
*/
private String name;
/**
* 鉴权 token后续 OpenAPI 调用需要
*/
private String token;
private Integer status;
/**
* 部门,组织架构相关属性。
* 预留数据库字段方便基于组织架构二次开发
*/
private String dept;
/**
* 标签,扩展性之王,多值逗号分割
*/
private String tags;
/**
* 扩展字段
*/
private String extra;
private Date gmtCreate;
private Date gmtModified;
private Long creator;
private Long modifier;
}

View File

@ -0,0 +1,46 @@
package tech.powerjob.server.persistence.remote.model;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.hibernate.annotations.GenericGenerator;
import javax.persistence.*;
import java.util.Date;
/**
* 数据库锁
*
* @author tjq
* @since 2020/4/2
*/
@Data
@Entity
@NoArgsConstructor
@Table(uniqueConstraints = {@UniqueConstraint(name = "uidx01_oms_lock", columnNames = {"lockName"})})
public class OmsLockDO {
@Id
@GeneratedValue(strategy = GenerationType.AUTO, generator = "native")
@GenericGenerator(name = "native", strategy = "native")
private Long id;
private String lockName;
private String ownerIP;
/**
* 最长持有锁的时间
*/
private Long maxLockTime;
private Date gmtCreate;
private Date gmtModified;
public OmsLockDO(String lockName, String ownerIP, Long maxLockTime) {
this.lockName = lockName;
this.ownerIP = ownerIP;
this.maxLockTime = maxLockTime;
this.gmtCreate = new Date();
this.gmtModified = this.gmtCreate;
}
}

View File

@ -0,0 +1,36 @@
package tech.powerjob.server.persistence.remote.model;
import lombok.Data;
import org.hibernate.annotations.GenericGenerator;
import javax.persistence.*;
import java.util.Date;
/**
* PowerJob 自建登录体系的用户表,只存储使用 PowerJob 自带登录方式登录的用户信息
*
* @author tjq
* @since 2024/2/13
*/
@Data
@Entity
@Table(uniqueConstraints = {
@UniqueConstraint(name = "uidx01_username", columnNames = {"username"})
})
public class PwjbUserInfoDO {
@Id
@GeneratedValue(strategy = GenerationType.AUTO, generator = "native")
@GenericGenerator(name = "native", strategy = "native")
private Long id;
private String username;
private String password;
private String extra;
private Date gmtCreate;
private Date gmtModified;
}

View File

@ -0,0 +1,43 @@
package tech.powerjob.server.persistence.remote.model;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.hibernate.annotations.GenericGenerator;
import javax.persistence.*;
import java.util.Date;
/**
* 服务器信息表用于分配服务器唯一ID
*
* @author tjq
* @since 2020/4/15
*/
@Data
@Entity
@NoArgsConstructor
@Table(
uniqueConstraints = {@UniqueConstraint(name = "uidx01_server_info", columnNames = "ip")},
indexes = {@Index(name = "idx01_server_info", columnList = "gmtModified")}
)
public class ServerInfoDO {
@Id
@GeneratedValue(strategy = GenerationType.AUTO, generator = "native")
@GenericGenerator(name = "native", strategy = "native")
private Long id;
/**
* 服务器IP地址
*/
private String ip;
private Date gmtCreate;
private Date gmtModified;
public ServerInfoDO(String ip) {
this.ip = ip;
this.gmtCreate = new Date();
this.gmtModified = this.gmtCreate;
}
}

View File

@ -0,0 +1,49 @@
package tech.powerjob.server.persistence.remote.model;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.hibernate.annotations.GenericGenerator;
import javax.persistence.*;
import java.util.Date;
/**
* 杂项
* KKV 表存一些配置数据
*
* @author tjq
* @since 2024/2/15
*/
@Data
@Entity
@NoArgsConstructor
@Table(uniqueConstraints = {@UniqueConstraint(name = "uidx01_sundry", columnNames = {"pkey", "skey"})})
public class SundryDO {
@Id
@GeneratedValue(strategy = GenerationType.AUTO, generator = "native")
@GenericGenerator(name = "native", strategy = "native")
private Long id;
/**
* PKEY
*/
private String pkey;
/**
* SKEY
*/
private String skey;
/**
* 内容
*/
private String content;
/**
* 其他参数
*/
private String extra;
private Date gmtCreate;
private Date gmtModified;
}

View File

@ -0,0 +1,82 @@
package tech.powerjob.server.persistence.remote.model;
import lombok.Data;
import org.hibernate.annotations.GenericGenerator;
import javax.persistence.*;
import java.util.Date;
/**
* 用户信息表
* PowerJob 自身维护的全部用户体系数据
* 5.0.0 可能不兼容改动:为了支持第三方登录,需要通过 username 与第三方登录系统做匹配,该列需要声明为唯一索引,确保全局唯一
*
* @author tjq
* @since 2020/4/12
*/
@Data
@Entity
@Table(uniqueConstraints = {
@UniqueConstraint(name = "uidx01_user_name", columnNames = {"username"})
},
indexes = {
@Index(name = "uidx02_user_info", columnList = "email")
})
public class UserInfoDO {
@Id
@GeneratedValue(strategy = GenerationType.AUTO, generator = "native")
@GenericGenerator(name = "native", strategy = "native")
private Long id;
/**
* 账号类型
*/
private String accountType;
private String username;
/**
* since 5.0.0
* 昵称(第三方登陆的 username 很难识别,方便后续展示引入 nick
*/
private String nick;
private String password;
/**
* 手机号
*/
private String phone;
/**
* 邮箱地址
*/
private String email;
/**
* webHook
*/
private String webHook;
/**
* JWT 登录的二次校验信息
*/
private String tokenLoginVerifyInfo;
/**
* 扩展字段 for 第三方
* PowerJob 内部不允许使用该字段
*/
private String extra;
/**
* 原始账号 username
*/
private String originUsername;
/**
* 账号当前状态
*/
private Integer status;
private Date gmtCreate;
private Date gmtModified;
}

View File

@ -0,0 +1,53 @@
package tech.powerjob.server.persistence.remote.model;
import lombok.Data;
import org.hibernate.annotations.GenericGenerator;
import javax.persistence.*;
import java.util.Date;
/**
* 用户角色表
*
* @author tjq
* @since 2023/3/20
*/
@Data
@Entity
@Table(indexes = {
@Index(name = "uidx01_user_id", columnList = "userId")
})
public class UserRoleDO {
@Id
@GeneratedValue(strategy = GenerationType.AUTO, generator = "native")
@GenericGenerator(name = "native", strategy = "native")
private Long id;
/**
* 授予角色的用户ID
*/
private Long userId;
/**
* 权限范围namespace 还是 app
*/
private Integer scope;
/**
* 和 scope 一起组成授权目标,比如某个 app 或 某个 namespace
*/
private Long target;
/**
* 角色,比如 Observer
*/
private Integer role;
/**
* 扩展字段
*/
private String extra;
private Date gmtCreate;
private Date gmtModified;
}

View File

@ -0,0 +1,82 @@
package tech.powerjob.server.persistence.remote.model;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.hibernate.annotations.GenericGenerator;
import javax.persistence.*;
import java.util.Date;
/**
* DAG 工作流信息表
*
* @author tjq
* @since 2020/5/26
*/
@Data
@Entity
@NoArgsConstructor
@AllArgsConstructor
@Table(indexes = {
@Index(name = "idx01_workflow_info",columnList = "appId,status,timeExpressionType,nextTriggerTime")
})
public class WorkflowInfoDO {
@Id
@GeneratedValue(strategy = GenerationType.AUTO, generator = "native")
@GenericGenerator(name = "native", strategy = "native")
private Long id;
private String wfName;
private String wfDescription;
/**
* 所属应用ID
*/
private Long appId;
/**
* 工作流的DAG图信息点线式DAG的json
*/
@Lob
@Column
private String peDAG;
/* ************************** 定时参数 ************************** */
/**
* 时间表达式类型CRON/API/FIX_RATE/FIX_DELAY
*/
private Integer timeExpressionType;
/**
* 时间表达式CRON/NULL/LONG/LONG
*/
private String timeExpression;
/**
* 最大同时运行的工作流个数,默认 1
*/
private Integer maxWfInstanceNum;
/**
* 1 正常运行2 停止(不再调度)
*/
private Integer status;
/**
* 下一次调度时间
*/
private Long nextTriggerTime;
/**
* 工作流整体失败的报警
*/
private String notifyUserIds;
private Date gmtCreate;
private Date gmtModified;
private String extra;
private String lifecycle;
}

View File

@ -0,0 +1,88 @@
package tech.powerjob.server.persistence.remote.model;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.hibernate.annotations.GenericGenerator;
import javax.persistence.*;
import java.util.Date;
/**
* 工作流运行实例表
*
* @author tjq
* @since 2020/5/26
*/
@Data
@Entity
@NoArgsConstructor
@AllArgsConstructor
@Table(
uniqueConstraints = {@UniqueConstraint(name = "uidx01_wf_instance", columnNames = {"wfInstanceId"})},
indexes = {
@Index(name = "idx01_wf_instance", columnList = "workflowId,status"),
@Index(name = "idx01_wf_instance", columnList = "appId,status,expectedTriggerTime")
}
)
public class WorkflowInstanceInfoDO {
@Id
@GeneratedValue(strategy = GenerationType.AUTO, generator = "native")
@GenericGenerator(name = "native", strategy = "native")
private Long id;
/**
* 任务所属应用的ID冗余提高查询效率
*/
private Long appId;
/**
* workflowInstanceId任务实例表都使用单独的ID作为主键以支持潜在的分表需求
*/
private Long wfInstanceId;
/**
* 上层工作流实例 ID (用于支持工作流嵌套)
*/
private Long parentWfInstanceId;
private Long workflowId;
/**
* workflow 状态WorkflowInstanceStatus
*/
private Integer status;
/**
* 工作流启动参数
*/
@Lob
@Column
private String wfInitParams;
/**
* 工作流上下文
*/
@Lob
@Column
private String wfContext;
@Lob
@Column
private String dag;
@Lob
@Column
private String result;
/**
* 预计触发时间
*/
private Long expectedTriggerTime;
/**
* 实际触发时间
*/
private Long actualTriggerTime;
/**
* 结束时间
*/
private Long finishedTime;
private Date gmtCreate;
private Date gmtModified;
}

View File

@ -0,0 +1,82 @@
package tech.powerjob.server.persistence.remote.model;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.hibernate.annotations.GenericGenerator;
import tech.powerjob.common.enums.WorkflowNodeType;
import javax.persistence.*;
import java.util.Date;
/**
* 工作流节点信息
* 记录了工作流中的任务节点个性化的配置信息
*
* @author Echo009
* @since 2021/1/23
*/
@Data
@Entity
@NoArgsConstructor
@AllArgsConstructor
@Table(indexes = {
@Index(name = "idx01_workflow_node_info", columnList = "workflowId,gmtCreate")
})
public class WorkflowNodeInfoDO {
@Id
@GeneratedValue(strategy = GenerationType.AUTO, generator = "native")
@GenericGenerator(name = "native", strategy = "native")
private Long id;
@Column(nullable = false)
private Long appId;
@Column
private Long workflowId;
/**
* 节点类型 {@link WorkflowNodeType}
*/
private Integer type;
/**
* 任务 ID
* 对于嵌套工作流类型的节点而言,这里存储是工作流 ID
*/
private Long jobId;
/**
* 节点名称,默认为对应的任务名称
*/
private String nodeName;
/**
* 节点参数
*/
@Lob
private String nodeParams;
/**
* 是否启用
*/
@Column(nullable = false)
private Boolean enable;
/**
* 是否允许失败跳过
*/
@Column(nullable = false)
private Boolean skipWhenFailed;
@Lob
private String extra;
/**
* 创建时间
*/
@Column(nullable = false)
private Date gmtCreate;
/**
* 更新时间
*/
@Column(nullable = false)
private Date gmtModified;
}

View File

@ -0,0 +1,45 @@
package tech.powerjob.server.persistence.remote.model.brief;
import lombok.Data;
/**
* @author Echo009
* @since 2022/9/13
*/
@Data
public class BriefInstanceInfo {
private Long appId;
private Long id;
/**
* 任务ID
*/
private Long jobId;
/**
* 任务所属应用的ID冗余提高查询效率
*/
private Long instanceId;
/**
* 总共执行的次数(用于重试判断)
*/
private Long runningTimes;
public BriefInstanceInfo(Long appId, Long id, Long jobId, Long instanceId) {
this.appId = appId;
this.id = id;
this.jobId = jobId;
this.instanceId = instanceId;
}
public BriefInstanceInfo(Long appId, Long id, Long jobId, Long instanceId, Long runningTimes) {
this.appId = appId;
this.id = id;
this.jobId = jobId;
this.instanceId = instanceId;
this.runningTimes = runningTimes;
}
}

View File

@ -0,0 +1,8 @@
/**
* 关系型数据库持久层
* 需要注意实现平台无关性比如禁止使用某些数据库特有的时间函数MySQL:now()等等)
*
* @author tjq
* @since 2020/6/3
*/
package tech.powerjob.server.persistence.remote;

View File

@ -0,0 +1,40 @@
package tech.powerjob.server.persistence.remote.repository;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.Pageable;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.data.jpa.repository.JpaSpecificationExecutor;
import org.springframework.data.jpa.repository.Query;
import org.springframework.data.repository.query.Param;
import tech.powerjob.server.persistence.remote.model.AppInfoDO;
import java.util.Collection;
import java.util.List;
import java.util.Optional;
/**
* AppInfo 数据访问层
*
* @author tjq
* @since 2020/4/1
*/
public interface AppInfoRepository extends JpaRepository<AppInfoDO, Long>, JpaSpecificationExecutor<AppInfoDO> {
Optional<AppInfoDO> findByAppName(String appName);
Page<AppInfoDO> findByAppNameLike(String condition, Pageable pageable);
/**
* 根据 currentServer 查询 appId
* 其实只需要 id处于性能考虑可以直接写SQL只返回ID
*/
List<AppInfoDO> findAllByCurrentServer(String currentServer);
@Query(value = "select id from AppInfoDO where currentServer = :currentServer")
List<Long> listAppIdByCurrentServer(@Param("currentServer")String currentServer);
List<AppInfoDO> findAllByNamespaceId(Long namespaceId);
List<AppInfoDO> findAllByIdIn(Collection<Long> ids);
}

View File

@ -0,0 +1,17 @@
package tech.powerjob.server.persistence.remote.repository;
import tech.powerjob.server.persistence.remote.model.ContainerInfoDO;
import org.springframework.data.jpa.repository.JpaRepository;
import java.util.List;
/**
* 容器信息 数据操作层
*
* @author tjq
* @since 2020/5/15
*/
public interface ContainerInfoRepository extends JpaRepository<ContainerInfoDO, Long> {
List<ContainerInfoDO> findByAppIdAndStatusNot(Long appId, Integer status);
}

View File

@ -0,0 +1,147 @@
package tech.powerjob.server.persistence.remote.repository;
import com.google.errorprone.annotations.CanIgnoreReturnValue;
import org.springframework.data.domain.Pageable;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.data.jpa.repository.JpaSpecificationExecutor;
import org.springframework.data.jpa.repository.Modifying;
import org.springframework.data.jpa.repository.Query;
import org.springframework.data.repository.query.Param;
import tech.powerjob.server.persistence.remote.model.InstanceInfoDO;
import tech.powerjob.server.persistence.remote.model.brief.BriefInstanceInfo;
import javax.transaction.Transactional;
import java.util.Date;
import java.util.List;
/**
* JobLog 数据访问层
*
* @author tjq
* @since 2020/4/1
*/
public interface InstanceInfoRepository extends JpaRepository<InstanceInfoDO, Long>, JpaSpecificationExecutor<InstanceInfoDO> {
/**
* 统计当前JOB有多少实例正在运行
*/
@Query(value = "select count(*) from InstanceInfoDO where jobId = ?1 and status in ?2")
long countByJobIdAndStatusIn(long jobId, List<Integer> status);
List<InstanceInfoDO> findByJobIdAndStatusIn(long jobId, List<Integer> status);
/**
* 更新状态变更信息
*
* @param lastReportTime 最近一次上报时间
* @param modifyTime 更新时间
* @param runningTimes 运行次数
* @param instanceId 实例 ID
* @param status 目标状态
* @param oldStatus 旧状态
* @return 更新记录数
*/
@Transactional(rollbackOn = Exception.class)
@Modifying
@Query(value = "update InstanceInfoDO set lastReportTime = :lastReportTime, gmtModified = :modifyTime, runningTimes = :runningTimes, status = :status where instanceId = :instanceId and status = :oldStatus")
int updateStatusChangeInfoByInstanceIdAndStatus(@Param("lastReportTime") long lastReportTime, @Param("modifyTime") Date modifyTime, @Param("runningTimes") long runningTimes, @Param("status") int status, @Param("instanceId") long instanceId, @Param("oldStatus") int oldStatus);
/**
* 更新任务执行记录内容DispatchService专用
*
* @param instanceId 实例 ID
* @param status 状态
* @param actualTriggerTime 实际调度时间
* @param finishedTime 完成时间
* @param taskTrackerAddress taskTracker 地址
* @param result 结果
* @param modifyTime 更新时间
* @return 更新记录数量
*/
@Transactional(rollbackOn = Exception.class)
@Modifying
@CanIgnoreReturnValue
@Query(value = "update InstanceInfoDO set status = :status, actualTriggerTime = :actualTriggerTime, finishedTime = :finishedTime, taskTrackerAddress = :taskTrackerAddress, result = :result, gmtModified = :modifyTime where instanceId = :instanceId")
int update4TriggerFailed(@Param("instanceId") long instanceId, @Param("status") int status, @Param("actualTriggerTime") long actualTriggerTime, @Param("finishedTime") long finishedTime, @Param("taskTrackerAddress") String taskTrackerAddress, @Param("result") String result, @Param("modifyTime") Date modifyTime);
/**
* 更新任务执行记录内容DispatchService专用
*
* @param instanceId 任务实例ID分布式唯一
* @param status 状态
* @param actualTriggerTime 实际调度时间
* @param taskTrackerAddress taskTracker 地址
* @param modifyTime 更新时间
* @param oldStatus 旧状态
* @return 更新记录数量
*/
@Transactional(rollbackOn = Exception.class)
@Modifying
@CanIgnoreReturnValue
@Query(value = "update InstanceInfoDO set status = :status, actualTriggerTime = :actualTriggerTime, taskTrackerAddress = :taskTrackerAddress, gmtModified = :modifyTime where instanceId = :instanceId and status = :oldStatus")
int update4TriggerSucceed(@Param("instanceId") long instanceId, @Param("status") int status, @Param("actualTriggerTime") long actualTriggerTime, @Param("taskTrackerAddress") String taskTrackerAddress, @Param("modifyTime") Date modifyTime, @Param("oldStatus") int oldStatus);
@Transactional(rollbackOn = Exception.class)
@Modifying
@CanIgnoreReturnValue
@Query(value = "update InstanceInfoDO set status = :status, gmtModified = :modifyTime where instanceId = :instanceId and status = :originStatus ")
int updateStatusAndGmtModifiedByInstanceIdAndOriginStatus(@Param("instanceId") long instanceId, @Param("originStatus") int originStatus, @Param("status") int status, @Param("modifyTime") Date modifyTime);
@Transactional(rollbackOn = Exception.class)
@Modifying
@CanIgnoreReturnValue
@Query(value = "update InstanceInfoDO set status = :status, gmtModified = :modifyTime where instanceId in (:instanceIdList) and status = :originStatus ")
int updateStatusAndGmtModifiedByInstanceIdListAndOriginStatus(@Param("instanceIdList") List<Long> instanceIdList, @Param("originStatus") int originStatus, @Param("status") int status, @Param("modifyTime") Date modifyTime);
/**
* 更新固定频率任务的执行记录
*
* @param instanceId 任务实例ID分布式唯一
* @param status 状态
* @param runningTimes 执行次数
* @param modifyTime 更新时间
* @return 更新记录数量
*/
@Modifying
@Transactional(rollbackOn = Exception.class)
@CanIgnoreReturnValue
@Query(value = "update InstanceInfoDO set status = :status, runningTimes = :runningTimes, gmtModified = :modifyTime where instanceId = :instanceId")
int update4FrequentJob(@Param("instanceId") long instanceId, @Param("status") int status, @Param("runningTimes") long runningTimes, @Param("modifyTime") Date modifyTime);
List<InstanceInfoDO> findAllByAppIdInAndStatusAndExpectedTriggerTimeLessThan(@Param("appIds") List<Long> appIds, @Param("status") int status, @Param("time") long time, Pageable pageable);
@Query(value = "select new tech.powerjob.server.persistence.remote.model.brief.BriefInstanceInfo(i.appId,i.id,i.jobId,i.instanceId) from InstanceInfoDO i where i.appId in (:appIds) and i.status = :status and i.actualTriggerTime < :time")
List<BriefInstanceInfo> selectBriefInfoByAppIdInAndStatusAndActualTriggerTimeLessThan(@Param("appIds") List<Long> appIds, @Param("status") int status, @Param("time") long time, Pageable pageable);
@Query(value = "select new tech.powerjob.server.persistence.remote.model.brief.BriefInstanceInfo(i.appId,i.id,i.jobId,i.instanceId,i.runningTimes) from InstanceInfoDO i where i.appId in (:appIds) and i.status = :status and i.gmtModified < :time")
List<BriefInstanceInfo> selectBriefInfoByAppIdInAndStatusAndGmtModifiedBefore(@Param("appIds") List<Long> appIds, @Param("status") int status, @Param("time") Date time, Pageable pageable);
InstanceInfoDO findByInstanceId(long instanceId);
/* --数据统计-- */
@Query(value = "select count(*) from InstanceInfoDO where appId = ?1 and status = ?2")
long countByAppIdAndStatus(long appId, int status);
long countByAppIdAndStatusAndGmtCreateAfter(long appId, int status, Date time);
@Query(value = "select distinct jobId from InstanceInfoDO where jobId in ?1 and status in ?2")
List<Long> findByJobIdInAndStatusIn(List<Long> jobIds, List<Integer> status);
/**
* 删除历史数据JPA自带的删除居然是根据ID循环删2000条数据删了几秒也太拉垮了吧...
* 结果只能用 int 接收
*
* @param time 更新时间阈值
* @param status 状态
* @return 删除记录数
*/
@Modifying
@Transactional(rollbackOn = Exception.class)
@Query(value = "delete from InstanceInfoDO where gmtModified < ?1 and status in ?2")
int deleteAllByGmtModifiedBeforeAndStatusIn(Date time, List<Integer> status);
}

View File

@ -0,0 +1,50 @@
package tech.powerjob.server.persistence.remote.repository;
import tech.powerjob.server.persistence.remote.model.JobInfoDO;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.Pageable;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.data.jpa.repository.JpaSpecificationExecutor;
import org.springframework.data.jpa.repository.Query;
import java.util.Collection;
import java.util.List;
import java.util.Set;
/**
* JobInfo 数据访问层
*
* @author tjq
* @since 2020/4/1
*/
public interface JobInfoRepository extends JpaRepository<JobInfoDO, Long>, JpaSpecificationExecutor<JobInfoDO> {
/**
* 调度专用
*/
List<JobInfoDO> findByAppIdInAndStatusAndTimeExpressionTypeAndNextTriggerTimeLessThanEqual(List<Long> appIds, int status, int timeExpressionType, long time);
@Query(value = "select id from JobInfoDO where appId in ?1 and status = ?2 and timeExpressionType in ?3")
List<Long> findByAppIdInAndStatusAndTimeExpressionTypeIn(List<Long> appIds, int status, List<Integer> timeTypes);
Page<JobInfoDO> findByAppIdAndStatusNot(Long appId, int status, Pageable pageable);
Page<JobInfoDO> findByAppIdAndJobNameLikeAndStatusNot(Long appId, String condition, int status, Pageable pageable);
/**
* 校验工作流包含的任务
* @param appId APP ID
* @param statusSet 状态列表
* @param jobIds 任务ID
* @return 数量
*/
long countByAppIdAndStatusInAndIdIn(Long appId, Set<Integer> statusSet , Set<Long> jobIds);
long countByAppIdAndStatusNot(long appId, int status);
List<JobInfoDO> findByAppId(Long appId);
List<JobInfoDO> findByIdIn(Collection<Long> jobIds);
}

View File

@ -0,0 +1,22 @@
package tech.powerjob.server.persistence.remote.repository;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.data.jpa.repository.JpaSpecificationExecutor;
import tech.powerjob.server.persistence.remote.model.NamespaceDO;
import java.util.Collection;
import java.util.List;
import java.util.Optional;
/**
* 命名空间
*
* @author tjq
* @since 2023/9/3
*/
public interface NamespaceRepository extends JpaRepository<NamespaceDO, Long>, JpaSpecificationExecutor<NamespaceDO> {
Optional<NamespaceDO> findByCode(String code);
List<NamespaceDO> findAllByIdIn(Collection<Long> ids);
}

View File

@ -0,0 +1,28 @@
package tech.powerjob.server.persistence.remote.repository;
import tech.powerjob.server.persistence.remote.model.OmsLockDO;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.data.jpa.repository.Modifying;
import org.springframework.data.jpa.repository.Query;
import javax.transaction.Transactional;
/**
* 利用唯一性约束作为数据库锁
*
* @author tjq
* @since 2020/4/2
*/
public interface OmsLockRepository extends JpaRepository<OmsLockDO, Long> {
@Modifying
@Transactional(rollbackOn = Exception.class)
@Query(value = "delete from OmsLockDO where lockName = ?1")
int deleteByLockName(String lockName);
OmsLockDO findByLockName(String lockName);
@Modifying
@Transactional(rollbackOn = Exception.class)
int deleteByOwnerIP(String ip);
}

View File

@ -0,0 +1,17 @@
package tech.powerjob.server.persistence.remote.repository;
import org.springframework.data.jpa.repository.JpaRepository;
import tech.powerjob.server.persistence.remote.model.PwjbUserInfoDO;
import java.util.Optional;
/**
* PwjbUserInfoRepository
*
* @author tjq
* @since 2024/2/13
*/
public interface PwjbUserInfoRepository extends JpaRepository<PwjbUserInfoDO, Long> {
Optional<PwjbUserInfoDO> findByUsername(String username);
}

View File

@ -0,0 +1,39 @@
package tech.powerjob.server.persistence.remote.repository;
import tech.powerjob.server.persistence.remote.model.ServerInfoDO;
import com.google.errorprone.annotations.CanIgnoreReturnValue;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.data.jpa.repository.Modifying;
import org.springframework.data.jpa.repository.Query;
import org.springframework.data.repository.query.Param;
import javax.transaction.Transactional;
import java.util.Date;
/**
* 服务器信息 数据操作层
*
* @author tjq
* @since 2020/4/15
*/
public interface ServerInfoRepository extends JpaRepository<ServerInfoDO, Long> {
ServerInfoDO findByIp(String ip);
@Transactional(rollbackOn = Exception.class)
@Modifying
@CanIgnoreReturnValue
@Query(value = "update ServerInfoDO set gmtModified = :gmtModified where ip = :ip")
int updateGmtModifiedByIp(@Param("ip") String ip, @Param("gmtModified") Date gmtModified);
@Transactional(rollbackOn = Exception.class)
@Modifying
@CanIgnoreReturnValue
@Query(value = "update ServerInfoDO set id = :id where ip = :ip")
int updateIdByIp(@Param("id") long id, @Param("ip") String ip);
@Transactional(rollbackOn = Exception.class)
@Modifying
@Query(value = "delete from ServerInfoDO where gmtModified < ?1")
int deleteByGmtModifiedBefore(Date threshold);
}

View File

@ -0,0 +1,20 @@
package tech.powerjob.server.persistence.remote.repository;
import org.springframework.data.jpa.repository.JpaRepository;
import tech.powerjob.server.persistence.remote.model.SundryDO;
import java.util.List;
import java.util.Optional;
/**
* SundryRepository
*
* @author tjq
* @since 2024/2/15
*/
public interface SundryRepository extends JpaRepository<SundryDO, Long> {
List<SundryDO> findAllByPkey(String pkey);
Optional<SundryDO> findByPkeyAndSkey(String pkey, String skey);
}

View File

@ -0,0 +1,23 @@
package tech.powerjob.server.persistence.remote.repository;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.data.jpa.repository.JpaSpecificationExecutor;
import tech.powerjob.server.persistence.remote.model.UserInfoDO;
import java.util.List;
import java.util.Optional;
/**
* 用户信息表数据库访问层
*
* @author tjq
* @since 2020/4/12
*/
public interface UserInfoRepository extends JpaRepository<UserInfoDO, Long>, JpaSpecificationExecutor<UserInfoDO> {
Optional<UserInfoDO> findByUsername(String username);
List<UserInfoDO> findByUsernameLike(String username);
List<UserInfoDO> findByIdIn(List<Long> userIds);
}

View File

@ -0,0 +1,23 @@
package tech.powerjob.server.persistence.remote.repository;
import org.springframework.data.jpa.repository.JpaRepository;
import tech.powerjob.server.persistence.remote.model.UserRoleDO;
import java.util.List;
/**
* UserRoleRepository
*
* @author tjq
* @since 2023/3/20
*/
public interface UserRoleRepository extends JpaRepository<UserRoleDO, Long> {
List<UserRoleDO> findAllByUserId(Long userId);
List<UserRoleDO> findAllByScopeAndTarget(Integer scope, Long target);
List<UserRoleDO> findAllByScopeAndTargetAndRoleAndUserId(Integer scope, Long target, Integer role, Long userId);
List<UserRoleDO> findAllByUserIdAndScope(Long userId, Integer scope);
}

View File

@ -0,0 +1,37 @@
package tech.powerjob.server.persistence.remote.repository;
import tech.powerjob.server.persistence.remote.model.WorkflowInfoDO;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.Pageable;
import org.springframework.data.jpa.repository.JpaRepository;
import java.util.List;
/**
* DAG 工作流 数据操作层
*
* @author tjq
* @since 2020/5/26
*/
public interface WorkflowInfoRepository extends JpaRepository<WorkflowInfoDO, Long> {
List<WorkflowInfoDO> findByAppIdInAndStatusAndTimeExpressionTypeAndNextTriggerTimeLessThanEqual(List<Long> appIds, int status, int timeExpressionType, long time);
/**
* 查询指定 APP 下所有的工作流信息
*
* @param appId APP ID
* @return 该 APP 下的所有工作流信息
*/
List<WorkflowInfoDO> findByAppId(Long appId);
/**
* 对外查询list三兄弟
*/
Page<WorkflowInfoDO> findByAppIdAndStatusNot(Long appId, int nStatus, Pageable pageable);
Page<WorkflowInfoDO> findByIdAndStatusNot(Long id, int nStatus, Pageable pageable);
Page<WorkflowInfoDO> findByAppIdAndStatusNotAndWfNameLike(Long appId, int nStatus, String condition, Pageable pageable);
}

View File

@ -0,0 +1,56 @@
package tech.powerjob.server.persistence.remote.repository;
import tech.powerjob.server.persistence.remote.model.WorkflowInstanceInfoDO;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.data.jpa.repository.Modifying;
import org.springframework.data.jpa.repository.Query;
import javax.transaction.Transactional;
import java.util.Date;
import java.util.List;
import java.util.Optional;
/**
* 工作流运行实例数据操作
*
* @author tjq
* @since 2020/5/26
*/
public interface WorkflowInstanceInfoRepository extends JpaRepository<WorkflowInstanceInfoDO, Long> {
/**
* 查找对应工作流实例
* @param wfInstanceId 实例 ID
* @return 工作流实例
*/
Optional<WorkflowInstanceInfoDO> findByWfInstanceId(Long wfInstanceId);
/**
* 删除历史数据JPA自带的删除居然是根据ID循环删2000条数据删了几秒也太拉垮了吧...
* 结果只能用 int 接收
* @param time 更新时间阈值
* @param status 状态列表
* @return 删除的记录条数
*/
@Modifying
@Transactional(rollbackOn = Exception.class)
@Query(value = "delete from WorkflowInstanceInfoDO where gmtModified < ?1 and status in ?2")
int deleteAllByGmtModifiedBeforeAndStatusIn(Date time, List<Integer> status);
/**
* 统计该工作流下处于对应状态的实例数量
* @param workflowId 工作流 ID
* @param status 状态列表
* @return 更新的记录条数
*/
int countByWorkflowIdAndStatusIn(Long workflowId, List<Integer> status);
/**
* 加载期望调度时间小于给定阈值的
* @param appIds 应用 ID 列表
* @param status 状态
* @param time 期望调度时间阈值
* @return 工作流实例列表
*/
List<WorkflowInstanceInfoDO> findByAppIdInAndStatusAndExpectedTriggerTimeLessThan(List<Long> appIds, int status, long time);
}

View File

@ -0,0 +1,49 @@
package tech.powerjob.server.persistence.remote.repository;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.data.jpa.repository.Modifying;
import org.springframework.data.jpa.repository.Query;
import tech.powerjob.server.persistence.remote.model.WorkflowNodeInfoDO;
import javax.transaction.Transactional;
import java.util.Date;
import java.util.List;
/**
* WorkflowNodeInfo 数据访问层
*
* @author Echo009
* @since 2021/2/1
*/
public interface WorkflowNodeInfoRepository extends JpaRepository<WorkflowNodeInfoDO, Long> {
/**
* 根据工作流id查找所有的节点
*
* @param workflowId 工作流id
* @return 节点信息集合
*/
List<WorkflowNodeInfoDO> findByWorkflowId(Long workflowId);
/**
* 根据工作流节点 ID 删除节点
*
* @param workflowId 工作流ID
* @param id 节点 ID
* @return 删除记录数
*/
int deleteByWorkflowIdAndIdNotIn(Long workflowId, List<Long> id);
/**
* 删除工作流 ID 为空,且创建时间早于指定时间的节点信息
*
* @param crtTimeThreshold 创建时间阈值
* @return 删除记录条数
*/
@Modifying
@Transactional(rollbackOn = Exception.class)
@Query(value = "delete from WorkflowNodeInfoDO where workflowId is null and gmtCreate < ?1")
int deleteAllByWorkflowIdIsNullAndGmtCreateBefore(Date crtTimeThreshold);
}

View File

@ -0,0 +1,49 @@
package tech.powerjob.server.persistence.storage;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.core.env.Environment;
import tech.powerjob.server.common.aware.ServerInfoAware;
import tech.powerjob.server.common.module.ServerInfo;
import tech.powerjob.server.extension.dfs.DFsService;
/**
* AbstractDFsService
*
* @author tjq
* @since 2023/7/28
*/
@Slf4j
public abstract class AbstractDFsService implements DFsService, ApplicationContextAware, ServerInfoAware, DisposableBean {
protected ServerInfo serverInfo;
protected ApplicationContext applicationContext;
public AbstractDFsService() {
log.info("[DFsService] invoke [{}]'s constructor", this.getClass().getName());
}
abstract protected void init(ApplicationContext applicationContext);
protected static final String PROPERTY_KEY = "oms.storage.dfs";
protected static String fetchProperty(Environment environment, String dfsType, String key) {
String pKey = String.format("%s.%s.%s", PROPERTY_KEY, dfsType, key);
return environment.getProperty(pKey);
}
@Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
this.applicationContext = applicationContext;
log.info("[DFsService] invoke [{}]'s setApplicationContext", this.getClass().getName());
init(applicationContext);
}
@Override
public void setServerInfo(ServerInfo serverInfo) {
this.serverInfo = serverInfo;
}
}

View File

@ -0,0 +1,15 @@
package tech.powerjob.server.persistence.storage;
/**
* Constants
*
* @author tjq
* @since 2023/7/30
*/
public class Constants {
public static final String LOG_BUCKET = "log";
public static final String CONTAINER_BUCKET = "container";
}

View File

@ -0,0 +1,53 @@
package tech.powerjob.server.persistence.storage;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Conditional;
import org.springframework.context.annotation.Configuration;
import tech.powerjob.server.extension.dfs.DFsService;
import tech.powerjob.server.persistence.storage.impl.*;
/**
* 初始化内置的存储服务
*
* @author tjq
* @since 2023/7/30
*/
@Configuration
public class StorageConfiguration {
@Bean
@Conditional(GridFsService.GridFsCondition.class)
public DFsService initGridFs() {
return new GridFsService();
}
@Bean
@Conditional(MySqlSeriesDfsService.MySqlSeriesCondition.class)
public DFsService initDbFs() {
return new MySqlSeriesDfsService();
}
@Bean
@Conditional(PostgresqlSeriesDfsService.PostgresqlSeriesCondition.class)
public DFsService initPGDbFs() {
return new PostgresqlSeriesDfsService();
}
@Bean
@Conditional(AliOssService.AliOssCondition.class)
public DFsService initAliOssFs() {
return new AliOssService();
}
@Bean
@Conditional(MinioOssService.MinioOssCondition.class)
public DFsService initMinioOssFs() {
return new MinioOssService();
}
@Bean
@Conditional(EmptyDFsService.EmptyCondition.class)
public DFsService initEmptyDfs() {
return new EmptyDFsService();
}
}

View File

@ -0,0 +1,229 @@
package tech.powerjob.server.persistence.storage.impl;
import com.aliyun.oss.OSS;
import com.aliyun.oss.OSSClientBuilder;
import com.aliyun.oss.OSSException;
import com.aliyun.oss.common.auth.CredentialsProvider;
import com.aliyun.oss.common.auth.CredentialsProviderFactory;
import com.aliyun.oss.common.auth.DefaultCredentialProvider;
import com.aliyun.oss.model.DownloadFileRequest;
import com.aliyun.oss.model.ObjectMetadata;
import com.aliyun.oss.model.PutObjectRequest;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.springframework.context.ApplicationContext;
import org.springframework.context.annotation.Conditional;
import org.springframework.core.env.Environment;
import tech.powerjob.server.extension.dfs.*;
import tech.powerjob.server.persistence.storage.AbstractDFsService;
import tech.powerjob.server.common.spring.condition.PropertyAndOneBeanCondition;
import javax.annotation.Priority;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Optional;
/**
* Alibaba OSS support
* <a href="https://www.aliyun.com/product/oss">海量、安全、低成本、高可靠的云存储服务</a>
* 配置项:
* oms.storage.dfs.alioss.endpoint
* oms.storage.dfs.alioss.bucket
* oms.storage.dfs.alioss.credential_type
* oms.storage.dfs.alioss.ak
* oms.storage.dfs.alioss.sk
* oms.storage.dfs.alioss.token
*
* @author tjq
* @since 2023/7/30
*/
@Slf4j
@Priority(value = Integer.MAX_VALUE - 1)
@Conditional(AliOssService.AliOssCondition.class)
public class AliOssService extends AbstractDFsService {
private static final String TYPE_ALI_OSS = "alioss";
private static final String KEY_ENDPOINT = "endpoint";
private static final String KEY_BUCKET = "bucket";
private static final String KEY_CREDENTIAL_TYPE = "credential_type";
private static final String KEY_AK = "ak";
private static final String KEY_SK = "sk";
private static final String KEY_TOKEN = "token";
private OSS oss;
private String bucket;
private static final int DOWNLOAD_PART_SIZE = 10240;
private static final String NO_SUCH_KEY = "NoSuchKey";
@Override
public void store(StoreRequest storeRequest) throws IOException {
ObjectMetadata objectMetadata = new ObjectMetadata();
PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, parseFileName(storeRequest.getFileLocation()), storeRequest.getLocalFile(), objectMetadata);
oss.putObject(putObjectRequest);
}
@Override
public void download(DownloadRequest downloadRequest) throws IOException {
FileLocation dfl = downloadRequest.getFileLocation();
DownloadFileRequest downloadFileRequest = new DownloadFileRequest(bucket, parseFileName(dfl), downloadRequest.getTarget().getAbsolutePath(), DOWNLOAD_PART_SIZE);
try {
FileUtils.forceMkdirParent(downloadRequest.getTarget());
oss.downloadFile(downloadFileRequest);
} catch (Throwable t) {
ExceptionUtils.rethrow(t);
}
}
@Override
public Optional<FileMeta> fetchFileMeta(FileLocation fileLocation) throws IOException {
try {
ObjectMetadata objectMetadata = oss.getObjectMetadata(bucket, parseFileName(fileLocation));
return Optional.ofNullable(objectMetadata).map(ossM -> {
Map<String, Object> metaInfo = Maps.newHashMap();
metaInfo.putAll(ossM.getRawMetadata());
if (ossM.getUserMetadata() != null) {
metaInfo.putAll(ossM.getUserMetadata());
}
return new FileMeta()
.setLastModifiedTime(ossM.getLastModified())
.setLength(ossM.getContentLength())
.setMetaInfo(metaInfo);
});
} catch (OSSException oe) {
String errorCode = oe.getErrorCode();
if (NO_SUCH_KEY.equalsIgnoreCase(errorCode)) {
return Optional.empty();
}
ExceptionUtils.rethrow(oe);
}
return Optional.empty();
}
private static String parseFileName(FileLocation fileLocation) {
return String.format("%s/%s", fileLocation.getBucket(), fileLocation.getName());
}
void initOssClient(String endpoint, String bucket, String mode, String ak, String sk, String token) throws Exception {
log.info("[AliOssService] init OSS by config: endpoint={},bucket={},credentialType={},ak={},sk={},token={}", endpoint, bucket, mode, ak, sk, token);
if (StringUtils.isEmpty(bucket)) {
throw new IllegalArgumentException("'oms.storage.dfs.alioss.bucket' can't be empty, please creat a bucket in aliyun oss console then config it to powerjob");
}
this.bucket = bucket;
CredentialsProvider credentialsProvider;
CredentialType credentialType = CredentialType.parse(mode);
switch (credentialType) {
case PWD:
credentialsProvider = new DefaultCredentialProvider(ak, sk, token);
break;
case SYSTEM_PROPERTY:
credentialsProvider = CredentialsProviderFactory.newSystemPropertiesCredentialsProvider();
break;
default:
credentialsProvider = CredentialsProviderFactory.newEnvironmentVariableCredentialsProvider();
}
this.oss = new OSSClientBuilder().build(endpoint, credentialsProvider);
log.info("[AliOssService] initialize successfully, THIS_WILL_BE_THE_STORAGE_LAYER.");
}
@Override
public void cleanExpiredFiles(String bucket, int days) {
/*
阿里云 OSS 自带生命周期管理请参考文档进行配置代码层面不进行实现浪费服务器资源https://help.aliyun.com/zh/oss/user-guide/overview-54
阿里云 OSS 自带生命周期管理请参考文档进行配置代码层面不进行实现浪费服务器资源https://help.aliyun.com/zh/oss/user-guide/overview-54
阿里云 OSS 自带生命周期管理请参考文档进行配置代码层面不进行实现浪费服务器资源https://help.aliyun.com/zh/oss/user-guide/overview-54
*/
}
@Override
public void destroy() throws Exception {
oss.shutdown();
}
@Override
protected void init(ApplicationContext applicationContext) {
Environment environment = applicationContext.getEnvironment();
String endpoint = fetchProperty(environment, TYPE_ALI_OSS, KEY_ENDPOINT);
String bkt = fetchProperty(environment, TYPE_ALI_OSS, KEY_BUCKET);
String ct = fetchProperty(environment, TYPE_ALI_OSS, KEY_CREDENTIAL_TYPE);
String ak = fetchProperty(environment, TYPE_ALI_OSS, KEY_AK);
String sk = fetchProperty(environment, TYPE_ALI_OSS, KEY_SK);
String token = fetchProperty(environment, TYPE_ALI_OSS, KEY_TOKEN);
try {
initOssClient(endpoint, bkt, ct, ak, sk, token);
} catch (Exception e) {
ExceptionUtils.rethrow(e);
}
}
@Getter
@AllArgsConstructor
enum CredentialType {
/**
* 从环境读取
*/
ENV("env"),
/**
* 系统配置
*/
SYSTEM_PROPERTY("sys"),
/**
* 从账号密码读取
*/
PWD("pwd")
;
private final String code;
/**
* parse credential type
* @param mode oms.storage.dfs.alioss.credential_type
* @return CredentialType
*/
public static CredentialType parse(String mode) {
for (CredentialType credentialType : values()) {
if (StringUtils.equalsIgnoreCase(credentialType.code, mode)) {
return credentialType;
}
}
return PWD;
}
}
public static class AliOssCondition extends PropertyAndOneBeanCondition {
@Override
protected List<String> anyConfigKey() {
return Lists.newArrayList("oms.storage.dfs.alioss.endpoint");
}
@Override
protected Class<?> beanType() {
return DFsService.class;
}
}
}

View File

@ -0,0 +1,61 @@
package tech.powerjob.server.persistence.storage.impl;
import lombok.extern.slf4j.Slf4j;
import org.springframework.context.ApplicationContext;
import org.springframework.context.annotation.Conditional;
import tech.powerjob.server.extension.dfs.*;
import tech.powerjob.server.persistence.storage.AbstractDFsService;
import tech.powerjob.server.common.spring.condition.PropertyAndOneBeanCondition;
import javax.annotation.Priority;
import java.io.IOException;
import java.util.List;
import java.util.Optional;
/**
* EmptyDFsService
*
* @author tjq
* @since 2023/7/30
*/
@Slf4j
@Priority(value = Integer.MAX_VALUE)
@Conditional(EmptyDFsService.EmptyCondition.class)
public class EmptyDFsService extends AbstractDFsService {
@Override
public void store(StoreRequest storeRequest) throws IOException {
}
@Override
public void download(DownloadRequest downloadRequest) throws IOException {
}
@Override
public Optional<FileMeta> fetchFileMeta(FileLocation fileLocation) throws IOException {
return Optional.empty();
}
@Override
public void destroy() throws Exception {
}
@Override
protected void init(ApplicationContext applicationContext) {
log.info("[EmptyDFsService] initialize successfully, THIS_WILL_BE_THE_STORAGE_LAYER.");
}
public static class EmptyCondition extends PropertyAndOneBeanCondition {
@Override
protected List<String> anyConfigKey() {
return null;
}
@Override
protected Class<?> beanType() {
return DFsService.class;
}
}
}

View File

@ -0,0 +1,176 @@
package tech.powerjob.server.persistence.storage.impl;
import com.google.common.base.Stopwatch;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.mongodb.ConnectionString;
import com.mongodb.client.MongoClient;
import com.mongodb.client.MongoClients;
import com.mongodb.client.MongoDatabase;
import com.mongodb.client.gridfs.GridFSBucket;
import com.mongodb.client.gridfs.GridFSBuckets;
import com.mongodb.client.gridfs.GridFSDownloadStream;
import com.mongodb.client.gridfs.GridFSFindIterable;
import com.mongodb.client.gridfs.model.GridFSFile;
import com.mongodb.client.model.Filters;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.bson.conversions.Bson;
import org.bson.types.ObjectId;
import org.springframework.context.ApplicationContext;
import org.springframework.context.annotation.Conditional;
import org.springframework.core.env.Environment;
import tech.powerjob.server.extension.dfs.*;
import tech.powerjob.server.persistence.storage.AbstractDFsService;
import tech.powerjob.server.common.spring.condition.PropertyAndOneBeanCondition;
import javax.annotation.Priority;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.IOException;
import java.nio.file.Files;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.Optional;
/**
* 使用 MongoDB GridFS 作为底层存储
* 配置用法oms.storage.dfs.mongodb.uri=mongodb+srv://zqq:No1Bug2Please3!@cluster0.wie54.gcp.mongodb.net/powerjob_daily?retryWrites=true&w=majority
*
* @author tjq
* @since 2023/7/28
*/
@Slf4j
@Priority(value = Integer.MAX_VALUE - 10)
@Conditional(GridFsService.GridFsCondition.class)
public class GridFsService extends AbstractDFsService {
private MongoClient mongoClient;
private MongoDatabase db;
private final Map<String, GridFSBucket> bucketCache = Maps.newConcurrentMap();
private static final String TYPE_MONGO = "mongodb";
private static final String KEY_URI = "uri";
private static final String SPRING_MONGO_DB_CONFIG_KEY = "spring.data.mongodb.uri";
@Override
public void store(StoreRequest storeRequest) throws IOException {
GridFSBucket bucket = getBucket(storeRequest.getFileLocation().getBucket());
try (BufferedInputStream bis = new BufferedInputStream(Files.newInputStream(storeRequest.getLocalFile().toPath()))) {
bucket.uploadFromStream(storeRequest.getFileLocation().getName(), bis);
}
}
@Override
public void download(DownloadRequest downloadRequest) throws IOException {
GridFSBucket bucket = getBucket(downloadRequest.getFileLocation().getBucket());
FileUtils.forceMkdirParent(downloadRequest.getTarget());
try (GridFSDownloadStream gis = bucket.openDownloadStream(downloadRequest.getFileLocation().getName());
BufferedOutputStream bos = new BufferedOutputStream(Files.newOutputStream(downloadRequest.getTarget().toPath()))
) {
byte[] buffer = new byte[1024];
int bytes = 0;
while ((bytes = gis.read(buffer)) != -1) {
bos.write(buffer, 0, bytes);
}
bos.flush();
}
}
@Override
public Optional<FileMeta> fetchFileMeta(FileLocation fileLocation) throws IOException {
GridFSBucket bucket = getBucket(fileLocation.getBucket());
GridFSFindIterable files = bucket.find(Filters.eq("filename", fileLocation.getName()));
GridFSFile first = files.first();
if (first == null) {
return Optional.empty();
}
return Optional.of(new FileMeta()
.setLength(first.getLength())
.setLastModifiedTime(first.getUploadDate())
.setMetaInfo(first.getMetadata()));
}
@Override
public void cleanExpiredFiles(String bucketName, int days) {
Stopwatch sw = Stopwatch.createStarted();
Date date = DateUtils.addDays(new Date(), -days);
GridFSBucket bucket = getBucket(bucketName);
Bson filter = Filters.lt("uploadDate", date);
// 循环删除性能很差?我猜你肯定没看过官方实现[狗头]org.springframework.data.mongodb.gridfs.GridFsTemplate.delete
bucket.find(filter).forEach(gridFSFile -> {
ObjectId objectId = gridFSFile.getObjectId();
try {
bucket.delete(objectId);
log.info("[GridFsService] deleted {}#{}", bucketName, objectId);
}catch (Exception e) {
log.error("[GridFsService] deleted {}#{} failed.", bucketName, objectId, e);
}
});
log.info("[GridFsService] clean bucket({}) successfully, delete all files before {}, using {}.", bucketName, date, sw.stop());
}
private GridFSBucket getBucket(String bucketName) {
return bucketCache.computeIfAbsent(bucketName, ignore -> GridFSBuckets.create(db, bucketName));
}
private String parseMongoUri(Environment environment) {
// 优先从新的规则读取
String uri = fetchProperty(environment, TYPE_MONGO, KEY_URI);
if (StringUtils.isNotEmpty(uri)) {
return uri;
}
// 兼容 4.3.3 前的逻辑,读取 SpringMongoDB 配置
return environment.getProperty(SPRING_MONGO_DB_CONFIG_KEY);
}
void initMongo(String uri) {
log.info("[GridFsService] mongoDB uri: {}", uri);
if (StringUtils.isEmpty(uri)) {
log.warn("[GridFsService] uri is empty, GridFsService is off now!");
return;
}
ConnectionString connectionString = new ConnectionString(uri);
mongoClient = MongoClients.create(connectionString);
if (StringUtils.isEmpty(connectionString.getDatabase())) {
log.warn("[GridFsService] can't find database info from uri, will use [powerjob] as default, please make sure you have created the database 'powerjob'");
}
db = mongoClient.getDatabase(Optional.ofNullable(connectionString.getDatabase()).orElse("powerjob"));
log.info("[GridFsService] initialize MongoDB and GridFS successfully, will use mongodb GridFs as storage layer.");
}
@Override
public void destroy() throws Exception {
mongoClient.close();
}
@Override
protected void init(ApplicationContext applicationContext) {
String uri = parseMongoUri(applicationContext.getEnvironment());
initMongo(uri);
log.info("[GridFsService] initialize successfully, THIS_WILL_BE_THE_STORAGE_LAYER.");
}
public static class GridFsCondition extends PropertyAndOneBeanCondition {
@Override
protected List<String> anyConfigKey() {
return Lists.newArrayList("spring.data.mongodb.uri", "oms.storage.dfs.mongodb.uri");
}
@Override
protected Class<?> beanType() {
return DFsService.class;
}
}
}

View File

@ -0,0 +1,258 @@
package tech.powerjob.server.persistence.storage.impl;
import com.amazonaws.auth.AWSStaticCredentialsProvider;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.client.builder.AwsClientBuilder;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import com.amazonaws.services.s3.model.*;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import lombok.SneakyThrows;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.springframework.context.ApplicationContext;
import org.springframework.context.annotation.Conditional;
import org.springframework.core.env.Environment;
import tech.powerjob.server.common.spring.condition.PropertyAndOneBeanCondition;
import tech.powerjob.server.extension.dfs.*;
import tech.powerjob.server.persistence.storage.AbstractDFsService;
import javax.annotation.Priority;
import java.util.List;
import java.util.Map;
import java.util.Optional;
/**
* MINIO support
* <a href="https://min.io/">High Performance Object Storage</a>
* 配置项:
* oms.storage.dfs.minio.endpoint
* oms.storage.dfs.minio.bucketName
* oms.storage.dfs.minio.accessKey
* oms.storage.dfs.minio.secretKey
*
* @author xinyi
* @since 2023/8/21
*/
@Slf4j
@Priority(value = Integer.MAX_VALUE - 3)
@Conditional(MinioOssService.MinioOssCondition.class)
public class MinioOssService extends AbstractDFsService {
private static final String TYPE_MINIO = "minio";
private static final String KEY_ENDPOINT = "endpoint";
private static final String KEY_BUCKET_NAME = "bucketName";
private static final String ACCESS_KEY = "accessKey";
private static final String SECRET_KEY = "secretKey";
private AmazonS3 amazonS3;
private String bucket;
private static final String NOT_FOUNT = "404 Not Found";
@Override
public void store(StoreRequest storeRequest) {
try {
String fileName = parseFileName(storeRequest.getFileLocation());
// 创建 PutObjectRequest 对象
PutObjectRequest request = new PutObjectRequest(this.bucket, fileName, storeRequest.getLocalFile());
amazonS3.putObject(request);
} catch (Throwable t) {
ExceptionUtils.rethrow(t);
}
}
@Override
public void download(DownloadRequest downloadRequest) {
try {
FileUtils.forceMkdirParent(downloadRequest.getTarget());
String fileName = parseFileName(downloadRequest.getFileLocation());
GetObjectRequest getObjectRequest = new GetObjectRequest(this.bucket, fileName);
amazonS3.getObject(getObjectRequest, downloadRequest.getTarget());
} catch (Throwable t) {
ExceptionUtils.rethrow(t);
}
}
/**
* 获取文件元
*
* @param fileLocation 文件位置
*/
@Override
public Optional<FileMeta> fetchFileMeta(FileLocation fileLocation) {
try {
String fileName = parseFileName(fileLocation);
ObjectMetadata objectMetadata = amazonS3.getObjectMetadata(this.bucket, fileName);
return Optional.ofNullable(objectMetadata).map(minioStat -> {
Map<String, Object> metaInfo = Maps.newHashMap();
if (objectMetadata.getRawMetadata() != null) {
metaInfo.putAll(objectMetadata.getRawMetadata());
}
return new FileMeta()
.setLastModifiedTime(objectMetadata.getLastModified())
.setLength(objectMetadata.getContentLength())
.setMetaInfo(metaInfo);
});
} catch (AmazonS3Exception s3Exception) {
String errorCode = s3Exception.getErrorCode();
if (NOT_FOUNT.equalsIgnoreCase(errorCode)) {
return Optional.empty();
}
} catch (Exception oe) {
ExceptionUtils.rethrow(oe);
}
return Optional.empty();
}
private static String parseFileName(FileLocation fileLocation) {
return String.format("%s/%s", fileLocation.getBucket(), fileLocation.getName());
}
/**
* 清理过期文件
*
* @param bucket 桶名
* @param days 日期
*/
@Override
public void cleanExpiredFiles(String bucket, int days) {
/*
使用Minio的管理界面或Minio客户端命令行工具设置对象的生命周期规则。在生命周期规则中定义文件的过期时间。Minio将自动根据设置的规则删除过期的文件。
*/
}
/**
* 释放连接
*/
@Override
public void destroy() {
//minioClient.close();
}
/**
* 初始化minio
*
* @param applicationContext /
*/
@Override
protected void init(ApplicationContext applicationContext) {
Environment environment = applicationContext.getEnvironment();
String endpoint = fetchProperty(environment, TYPE_MINIO, KEY_ENDPOINT);
String bucketName = fetchProperty(environment, TYPE_MINIO, KEY_BUCKET_NAME);
String accessKey = fetchProperty(environment, TYPE_MINIO, ACCESS_KEY);
String secretKey = fetchProperty(environment, TYPE_MINIO, SECRET_KEY);
try {
initOssClient(endpoint, bucketName, accessKey, secretKey);
} catch (Exception e) {
ExceptionUtils.rethrow(e);
}
}
/**
* 创建minio连接并且创建桶
*
* @param endpoint 端口
* @param bucketName 桶名
* @param accessKey 访问密钥
* @param secretKey 秘密密钥
*/
public void initOssClient(String endpoint, String bucketName, String accessKey, String secretKey) {
log.info("[Minio] init OSS by config: endpoint={}, bucketName={}, accessKey={}, secretKey={}", endpoint, bucketName, accessKey, secretKey);
if (StringUtils.isEmpty(bucketName)) {
throw new IllegalArgumentException("'oms.storage.dfs.minio.bucketName' can't be empty, please creat a bucket in minio oss console then config it to powerjob");
}
// 创建凭证对象
BasicAWSCredentials awsCreds = new BasicAWSCredentials(accessKey, secretKey);
// 创建AmazonS3客户端并指定终端节点和凭证
this.amazonS3 = AmazonS3ClientBuilder.standard()
// 当使用 AWS Java SDK 连接到非AWS服务如MinIO指定区域Region是必需的即使这个区域对于你的MinIO实例并不真正适用。原因在于AWS SDK的客户端构建器需要一个区域来配置其服务端点即使在连接到本地或第三方S3兼容服务时也是如此。使用 "us-east-1" 作为占位符是很常见的做法因为它是AWS最常用的区域之一。这不会影响到实际的连接或数据传输因为真正的服务地址是由你提供的终端节点URL决定的。如果你的代码主要是与MinIO交互并且不涉及AWS服务那么这个区域设置只是形式上的要求。
.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(endpoint, "us-east-1"))
.withCredentials(new AWSStaticCredentialsProvider(awsCreds))
.withPathStyleAccessEnabled(true) // 重要:启用路径样式访问
.build();
this.bucket = bucketName;
createBucket(bucketName);
log.info("[Minio] initialize OSS successfully!");
}
/**
* 创建 bucket
*
* @param bucketName 桶名
*/
@SneakyThrows(Exception.class)
public void createBucket(String bucketName) {
// 建议自行创建 bucket设置好相关的策略
if (bucketExists(bucketName)) {
return;
}
Bucket createBucketResult = amazonS3.createBucket(bucketName);
log.info("[Minio] createBucket successfully, bucketName: {}, createResult: {}", bucketName, createBucketResult);
String policy = "{\n" +
" \"Version\": \"2012-10-17\",\n" +
" \"Statement\": [\n" +
" {\n" +
" \"Action\": [\n" +
" \"s3:GetObject\"\n" +
" ],\n" +
" \"Effect\": \"Allow\",\n" +
" \"Principal\": {\n" +
" \"AWS\": [\n" +
" \"*\"\n" +
" ]\n" +
" },\n" +
" \"Resource\": [\n" +
" \"arn:aws:s3:::" + bucketName + "/*\"\n" +
" ]\n" +
" }\n" +
" ]\n" +
"}";
try {
amazonS3.setBucketPolicy(bucketName, policy);
} catch (Exception e) {
log.warn("[Minio] setBucketPolicy failed, maybe you need to setBucketPolicy by yourself!", e);
}
}
/**
* 判断 bucket是否存在
*
* @param bucketName: 桶名
* @return boolean
*/
@SneakyThrows(Exception.class)
public boolean bucketExists(String bucketName) {
return amazonS3.doesBucketExistV2(bucketName);
}
public static class MinioOssCondition extends PropertyAndOneBeanCondition {
@Override
protected List<String> anyConfigKey() {
return Lists.newArrayList("oms.storage.dfs.minio.endpoint");
}
@Override
protected Class<?> beanType() {
return DFsService.class;
}
}
}

View File

@ -0,0 +1,350 @@
package tech.powerjob.server.persistence.storage.impl;
import com.google.common.base.Stopwatch;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.zaxxer.hikari.HikariConfig;
import com.zaxxer.hikari.HikariDataSource;
import lombok.Data;
import lombok.experimental.Accessors;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.springframework.context.ApplicationContext;
import org.springframework.context.annotation.Conditional;
import org.springframework.core.env.Environment;
import tech.powerjob.common.serialize.JsonUtils;
import tech.powerjob.common.utils.CommonUtils;
import tech.powerjob.common.enums.SwitchableStatus;
import tech.powerjob.server.common.spring.condition.PropertyAndOneBeanCondition;
import tech.powerjob.server.extension.dfs.*;
import tech.powerjob.server.persistence.storage.AbstractDFsService;
import javax.annotation.Priority;
import javax.sql.DataSource;
import java.io.BufferedInputStream;
import java.io.IOException;
import java.nio.file.Files;
import java.sql.*;
import java.util.List;
import java.util.Map;
import java.util.Optional;
/**
* MySQL 特性类似的数据库存储
* PS1. 大文件上传可能会报 max_allowed_packet 不足,可根据参数放开数据库限制 set global max_allowed_packet = 500*1024*1024
* PS2. 官方基于 MySQL 测试,其他数据库使用前请自测,敬请谅解!
* PS3. 数据库并不适合大规模的文件存储该扩展仅适用于简单业务大型业务场景请选择其他存储方案OSS、MongoDB等
* ********************* 配置项 *********************
* oms.storage.dfs.mysql_series.driver
* oms.storage.dfs.mysql_series.url
* oms.storage.dfs.mysql_series.username
* oms.storage.dfs.mysql_series.password
* oms.storage.dfs.mysql_series.auto_create_table
* oms.storage.dfs.mysql_series.table_name
*
* @author tjq
* @since 2023/8/9
*/
@Slf4j
@Priority(value = Integer.MAX_VALUE - 2)
@Conditional(MySqlSeriesDfsService.MySqlSeriesCondition.class)
public class MySqlSeriesDfsService extends AbstractDFsService {
private DataSource dataSource;
private static final String TYPE_MYSQL = "mysql_series";
/**
* 数据库驱动MYSQL8 为 com.mysql.cj.jdbc.Driver
*/
private static final String KEY_DRIVER_NAME = "driver";
/**
* 数据库地址,比如 jdbc:mysql://localhost:3306/powerjob-daily?useUnicode=true&characterEncoding=UTF-8&serverTimezone=Asia/Shanghai
*/
private static final String KEY_URL = "url";
/**
* 数据库账号,比如 root
*/
private static final String KEY_USERNAME = "username";
/**
* 数据库密码
*/
private static final String KEY_PASSWORD = "password";
/**
* 是否自动建表
*/
private static final String KEY_AUTO_CREATE_TABLE = "auto_create_table";
/**
* 表名
*/
private static final String KEY_TABLE_NAME = "table_name";
/* ********************* SQL region ********************* */
private static final String DEFAULT_TABLE_NAME = "powerjob_files";
private static final String CREATE_TABLE_SQL = "CREATE TABLE\n" +
"IF\n" +
"\tNOT EXISTS %s (\n" +
"\t\t`id` BIGINT NOT NULL AUTO_INCREMENT COMMENT 'ID',\n" +
"\t\t`bucket` VARCHAR ( 255 ) NOT NULL COMMENT '分桶',\n" +
"\t\t`name` VARCHAR ( 255 ) NOT NULL COMMENT '文件名称',\n" +
"\t\t`version` VARCHAR ( 255 ) NOT NULL COMMENT '版本',\n" +
"\t\t`meta` VARCHAR ( 255 ) COMMENT '元数据',\n" +
"\t\t`length` BIGINT NOT NULL COMMENT '长度',\n" +
"\t\t`status` INT NOT NULL COMMENT '状态',\n" +
"\t\t`data` LONGBLOB NOT NULL COMMENT '文件内容',\n" +
"\t\t`extra` VARCHAR ( 255 ) COMMENT '其他信息',\n" +
"\t\t`gmt_create` DATETIME NOT NULL COMMENT '创建时间',\n" +
"\t\t`gmt_modified` DATETIME COMMENT '更新时间',\n" +
"\tPRIMARY KEY ( id ) \n" +
"\t);";
private static final String INSERT_SQL = "insert into %s(bucket, name, version, meta, length, status, data, extra, gmt_create, gmt_modified) values (?,?,?,?,?,?,?,?,?,?);";
private static final String DELETE_SQL = "DELETE FROM %s ";
private static final String QUERY_FULL_SQL = "select * from %s";
private static final String QUERY_META_SQL = "select bucket, name, version, meta, length, status, extra, gmt_create, gmt_modified from %s";
private void deleteByLocation(FileLocation fileLocation) {
String dSQLPrefix = fullSQL(DELETE_SQL);
String dSQL = dSQLPrefix.concat(whereSQL(fileLocation));
executeDelete(dSQL);
}
private void executeDelete(String sql) {
try (Connection con = dataSource.getConnection()) {
con.createStatement().executeUpdate(sql);
} catch (Exception e) {
log.error("[MySqlSeriesDfsService] executeDelete failed, sql: {}", sql);
}
}
@Override
public void store(StoreRequest storeRequest) throws IOException {
Stopwatch sw = Stopwatch.createStarted();
String insertSQL = fullSQL(INSERT_SQL);
FileLocation fileLocation = storeRequest.getFileLocation();
// 覆盖写,写之前先删除
deleteByLocation(fileLocation);
Map<String, Object> meta = Maps.newHashMap();
meta.put("_server_", serverInfo.getIp());
meta.put("_local_file_path_", storeRequest.getLocalFile().getAbsolutePath());
BufferedInputStream bufferedInputStream = new BufferedInputStream(Files.newInputStream(storeRequest.getLocalFile().toPath()));
Date date = new Date(System.currentTimeMillis());
try (Connection con = dataSource.getConnection()) {
PreparedStatement pst = con.prepareStatement(insertSQL);
pst.setString(1, fileLocation.getBucket());
pst.setString(2, fileLocation.getName());
pst.setString(3, "mu");
pst.setString(4, JsonUtils.toJSONString(meta));
pst.setLong(5, storeRequest.getLocalFile().length());
pst.setInt(6, SwitchableStatus.ENABLE.getV());
pst.setBlob(7, bufferedInputStream);
pst.setString(8, null);
pst.setDate(9, date);
pst.setDate(10, date);
pst.execute();
log.info("[MySqlSeriesDfsService] store [{}] successfully, cost: {}", fileLocation, sw);
} catch (Exception e) {
log.error("[MySqlSeriesDfsService] store [{}] failed!", fileLocation);
ExceptionUtils.rethrow(e);
}finally {
bufferedInputStream.close();
}
}
@Override
public void download(DownloadRequest downloadRequest) throws IOException {
Stopwatch sw = Stopwatch.createStarted();
String querySQL = fullSQL(QUERY_FULL_SQL);
FileLocation fileLocation = downloadRequest.getFileLocation();
FileUtils.forceMkdirParent(downloadRequest.getTarget());
try (Connection con = dataSource.getConnection()) {
ResultSet resultSet = con.createStatement().executeQuery(querySQL.concat(whereSQL(fileLocation)));
boolean exist = resultSet.next();
if (!exist) {
log.warn("[MySqlSeriesDfsService] download file[{}] failed due to not exits!", fileLocation);
return;
}
Blob dataBlob = resultSet.getBlob("data");
FileUtils.copyInputStreamToFile(new BufferedInputStream(dataBlob.getBinaryStream()), downloadRequest.getTarget());
log.info("[MySqlSeriesDfsService] download [{}] successfully, cost: {}", fileLocation, sw);
} catch (Exception e) {
log.error("[MySqlSeriesDfsService] download file [{}] failed!", fileLocation, e);
ExceptionUtils.rethrow(e);
}
}
@Override
public Optional<FileMeta> fetchFileMeta(FileLocation fileLocation) throws IOException {
String querySQL = fullSQL(QUERY_META_SQL);
try (Connection con = dataSource.getConnection()) {
ResultSet resultSet = con.createStatement().executeQuery(querySQL.concat(whereSQL(fileLocation)));
boolean exist = resultSet.next();
if (!exist) {
return Optional.empty();
}
FileMeta fileMeta = new FileMeta()
.setLength(resultSet.getLong("length"))
.setLastModifiedTime(resultSet.getDate("gmt_modified"))
.setMetaInfo(JsonUtils.parseMap(resultSet.getString("meta")));
return Optional.of(fileMeta);
} catch (Exception e) {
log.error("[MySqlSeriesDfsService] fetchFileMeta [{}] failed!", fileLocation);
ExceptionUtils.rethrow(e);
}
return Optional.empty();
}
@Override
public void cleanExpiredFiles(String bucket, int days) {
// 虽然官方提供了服务端删除的能力,依然强烈建议用户直接在数据库层面配置清理事件!!!
String dSQLPrefix = fullSQL(DELETE_SQL);
final long targetTs = DateUtils.addDays(new Date(System.currentTimeMillis()), -days).getTime();
final String targetDeleteTime = CommonUtils.formatTime(targetTs);
log.info("[MySqlSeriesDfsService] start to cleanExpiredFiles, targetDeleteTime: {}", targetDeleteTime);
String fSQL = dSQLPrefix.concat(String.format(" where gmt_modified < '%s'", targetDeleteTime));
log.info("[MySqlSeriesDfsService] cleanExpiredFiles SQL: {}", fSQL);
executeDelete(fSQL);
}
@Override
protected void init(ApplicationContext applicationContext) {
Environment env = applicationContext.getEnvironment();
MySQLProperty mySQLProperty = new MySQLProperty()
.setDriver(fetchProperty(env, TYPE_MYSQL, KEY_DRIVER_NAME))
.setUrl(fetchProperty(env, TYPE_MYSQL, KEY_URL))
.setUsername(fetchProperty(env, TYPE_MYSQL, KEY_USERNAME))
.setPassword(fetchProperty(env, TYPE_MYSQL, KEY_PASSWORD))
.setAutoCreateTable(Boolean.TRUE.toString().equalsIgnoreCase(fetchProperty(env, TYPE_MYSQL, KEY_AUTO_CREATE_TABLE)))
;
try {
initDatabase(mySQLProperty);
initTable(mySQLProperty);
} catch (Exception e) {
log.error("[MySqlSeriesDfsService] init datasource failed!", e);
ExceptionUtils.rethrow(e);
}
log.info("[MySqlSeriesDfsService] initialize successfully, THIS_WILL_BE_THE_STORAGE_LAYER.");
}
void initDatabase(MySQLProperty property) {
log.info("[MySqlSeriesDfsService] init datasource by config: {}", property);
HikariConfig config = new HikariConfig();
config.setDriverClassName(property.driver);
config.setJdbcUrl(property.url);
config.setUsername(property.username);
config.setPassword(property.password);
config.setAutoCommit(true);
// 池中最小空闲连接数量
config.setMinimumIdle(2);
// 池中最大连接数量
config.setMaximumPoolSize(32);
dataSource = new HikariDataSource(config);
}
void initTable(MySQLProperty property) throws Exception {
if (property.autoCreateTable) {
String createTableSQL = fullSQL(CREATE_TABLE_SQL);
log.info("[MySqlSeriesDfsService] use create table SQL: {}", createTableSQL);
try (Connection connection = dataSource.getConnection()) {
connection.createStatement().execute(createTableSQL);
log.info("[MySqlSeriesDfsService] auto create table successfully!");
}
}
}
private String fullSQL(String sql) {
return String.format(sql, parseTableName());
}
private String parseTableName() {
// 误删,兼容本地 unit test
if (applicationContext == null) {
return DEFAULT_TABLE_NAME;
}
String tableName = fetchProperty(applicationContext.getEnvironment(), TYPE_MYSQL, KEY_TABLE_NAME);
return StringUtils.isEmpty(tableName) ? DEFAULT_TABLE_NAME : tableName;
}
private static String whereSQL(FileLocation fileLocation) {
return String.format(" where bucket='%s' AND name='%s' ", fileLocation.getBucket(), fileLocation.getName());
}
@Override
public void destroy() throws Exception {
}
@Data
@Accessors(chain = true)
static class MySQLProperty {
private String driver;
private String url;
private String username;
private String password;
private boolean autoCreateTable;
}
public static class MySqlSeriesCondition extends PropertyAndOneBeanCondition {
@Override
protected List<String> anyConfigKey() {
return Lists.newArrayList("oms.storage.dfs.mysql_series.url");
}
@Override
protected Class<?> beanType() {
return DFsService.class;
}
}
}

View File

@ -0,0 +1,406 @@
package tech.powerjob.server.persistence.storage.impl;
import com.google.common.base.Stopwatch;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.zaxxer.hikari.HikariConfig;
import com.zaxxer.hikari.HikariDataSource;
import lombok.Data;
import lombok.experimental.Accessors;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.springframework.context.ApplicationContext;
import org.springframework.context.annotation.Conditional;
import org.springframework.core.env.Environment;
import tech.powerjob.common.enums.SwitchableStatus;
import tech.powerjob.common.serialize.JsonUtils;
import tech.powerjob.common.utils.CommonUtils;
import tech.powerjob.server.common.spring.condition.PropertyAndOneBeanCondition;
import tech.powerjob.server.extension.dfs.*;
import tech.powerjob.server.persistence.storage.AbstractDFsService;
import javax.annotation.Priority;
import javax.sql.DataSource;
import java.io.BufferedInputStream;
import java.io.ByteArrayOutputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.file.Files;
import java.sql.*;
import java.util.List;
import java.util.Map;
import java.util.Optional;
/**
* postgresql 数据库存储使用的版本是14
* ********************* 配置项 *********************
* oms.storage.dfs.postgresql_series.driver
* oms.storage.dfs.postgresql_series.url
* oms.storage.dfs.postgresql_series.username
* oms.storage.dfs.postgresql_series.password
* oms.storage.dfs.postgresql_series.auto_create_table
* oms.storage.dfs.postgresql_series.table_name
*
* @author jetol
* @since 2024-1-8
*/
@Slf4j
@Priority(value = Integer.MAX_VALUE - 4)
@Conditional(PostgresqlSeriesDfsService.PostgresqlSeriesCondition.class)
public class PostgresqlSeriesDfsService extends AbstractDFsService {
private DataSource dataSource;
private static final String TYPE_POSTGRESQL = "postgresql_series";
/**
* 数据库驱动Postgresql 为 org.postgresql.Driver
*/
private static final String KEY_DRIVER_NAME = "driver";
/**
* 数据库地址,比如 jdbc:postgresql://localhost:3306/powerjob-daily
*/
private static final String KEY_URL = "url";
/**
* 数据库账号,比如 root
*/
private static final String KEY_USERNAME = "username";
/**
* 数据库密码
*/
private static final String KEY_PASSWORD = "password";
/**
* 是否自动建表
*/
private static final String KEY_AUTO_CREATE_TABLE = "auto_create_table";
/**
* 表名
*/
private static final String KEY_TABLE_NAME = "table_name";
/* ********************* SQL region ********************* */
private static final String DEFAULT_TABLE_NAME = "powerjob_files";
private static final String POWERJOB_FILES_ID_SEQ = "CREATE SEQUENCE if not exists powerjob_files_id_seq\n" +
" START WITH 1\n" +
" INCREMENT BY 1\n" +
" NO MINVALUE\n" +
" NO MAXVALUE\n" +
" CACHE 1;" ;
private static final String CREATE_TABLE_SQL = "CREATE TABLE if not exists powerjob_files (\n" +
" id bigint NOT NULL DEFAULT nextval('powerjob_files_id_seq') PRIMARY KEY,\n" +
" bucket varchar(255) NOT NULL,\n" +
" name varchar(255) NOT NULL,\n" +
" version varchar(255) NOT NULL,\n" +
" meta varchar(255) NULL DEFAULT NULL,\n" +
" length bigint NOT NULL,\n" +
" status int NOT NULL,\n" +
" data bytea NOT NULL,\n" +
" extra varchar(255) NULL DEFAULT NULL,\n" +
" gmt_create timestamp without time zone NOT NULL,\n" +
" gmt_modified timestamp without time zone NULL DEFAULT NULL\n" +
");";
private static final String INSERT_SQL = "insert into %s(bucket, name, version, meta, length, status, data, extra, gmt_create, gmt_modified) values (?,?,?,?,?,?,?,?,?,?);";
private static final String DELETE_SQL = "DELETE FROM %s ";
private static final String QUERY_FULL_SQL = "select * from %s";
private static final String QUERY_META_SQL = "select bucket, name, version, meta, length, status, extra, gmt_create, gmt_modified from %s";
private void deleteByLocation(FileLocation fileLocation) {
String dSQLPrefix = fullSQL(DELETE_SQL);
String dSQL = dSQLPrefix.concat(whereSQL(fileLocation));
executeDelete(dSQL);
}
private void executeDelete(String sql) {
try (Connection con = dataSource.getConnection()) {
con.createStatement().executeUpdate(sql);
} catch (Exception e) {
log.error("[PostgresqlSeriesDfsService] executeDelete failed, sql: {}", sql);
}
}
@Override
public void store(StoreRequest storeRequest) throws IOException, SQLException {
Stopwatch sw = Stopwatch.createStarted();
String insertSQL = fullSQL(INSERT_SQL);
FileLocation fileLocation = storeRequest.getFileLocation();
// 覆盖写,写之前先删除
deleteByLocation(fileLocation);
Map<String, Object> meta = Maps.newHashMap();
meta.put("_server_", serverInfo.getIp());
meta.put("_local_file_path_", storeRequest.getLocalFile().getAbsolutePath());
BufferedInputStream bufferedInputStream = new BufferedInputStream(Files.newInputStream(storeRequest.getLocalFile().toPath()));
Date date = new Date(System.currentTimeMillis());
Connection con =null;
PreparedStatement pst =null;
try {
con = dataSource.getConnection();
//pg库提示报错org.postgresql.util.PSQLException: Large Objects may not be used in auto-commit mode.
con.setAutoCommit(false);
pst = con.prepareStatement(insertSQL);
pst.setString(1, fileLocation.getBucket());
pst.setString(2, fileLocation.getName());
pst.setString(3, "mu");
pst.setString(4, JsonUtils.toJSONString(meta));
pst.setLong(5, storeRequest.getLocalFile().length());
pst.setInt(6, SwitchableStatus.ENABLE.getV());
//PreparedStatement类并没有提供setBlob方法来直接设置BYTEA类型字段因为PostgreSQL不支持JDBC中的java.sql.Blob接口
// pst.setBlob(7, bufferedInputStream);org.postgresql.util.PSQLException: ERROR: column "data" is of type bytea but expression is of type bigint
pst.setBytes(7, bufferedInputStreamToByteArray(bufferedInputStream));
pst.setString(8, null);
pst.setDate(9, date);
pst.setDate(10, date);
pst.execute();
con.commit();
log.info("[PostgresqlSeriesDfsService] store [{}] successfully, cost: {}", fileLocation, sw);
} catch (Exception e) {
if(con != null){
con.rollback();
}
log.error("[PostgresqlSeriesDfsService] store [{}] failed!", fileLocation, e);
ExceptionUtils.rethrow(e);
}finally {
if(con != null){
//设置回来,恢复自动提交模式
con.setAutoCommit(true);
con.close();
}
if(null != pst){
pst.close();
}
bufferedInputStream.close();
}
}
/**
* 上面已经有异常处理,这里直接往上抛
* @param bis
* @return
* @throws IOException
*/
public static byte[] bufferedInputStreamToByteArray(BufferedInputStream bis) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
if(null == bis ){
return null;
}
// 创建缓冲区
byte[] buffer = new byte[1024];
int read;
// 读取流中的数据并写入到ByteArrayOutputStream
while ((read = bis.read(buffer)) != -1) {
baos.write(buffer, 0, read);
}
// 关闭输入流
bis.close();
// 转换为字节数组并返回
return baos.toByteArray();
}
@Override
public void download(DownloadRequest downloadRequest) throws IOException {
Stopwatch sw = Stopwatch.createStarted();
String querySQL = fullSQL(QUERY_FULL_SQL);
FileLocation fileLocation = downloadRequest.getFileLocation();
FileUtils.forceMkdirParent(downloadRequest.getTarget());
try (Connection con = dataSource.getConnection()) {
ResultSet resultSet = con.createStatement().executeQuery(querySQL.concat(whereSQL(fileLocation)));
boolean exist = resultSet.next();
if (!exist) {
log.warn("[PostgresqlSeriesDfsService] download file[{}] failed due to not exits!", fileLocation);
return;
}
// 在 PostgreSQL 中bytea 类型的数据并不直接映射为 JDBC 的 Blob 类型。相反bytea 数据应当被处理为字节数组 (byte[]) 而不是 Blob 对象
try {
byte[] dataBytes = resultSet.getBytes("data");
try (FileOutputStream fos = new FileOutputStream(downloadRequest.getTarget())) {
fos.write(dataBytes);
}
} catch (Exception ignore) {
// 测试发现会报错 报错“不良的类型值 long”但并未有用户反馈问题暂时保留老写法可能是不同DB获取方式不同
Blob dataBlob = resultSet.getBlob("data");
FileUtils.copyInputStreamToFile(new BufferedInputStream(dataBlob.getBinaryStream()), downloadRequest.getTarget());
}
log.info("[PostgresqlSeriesDfsService] download [{}] successfully, cost: {}", fileLocation, sw);
} catch (Exception e) {
log.error("[PostgresqlSeriesDfsService] download file [{}] failed!", fileLocation, e);
ExceptionUtils.rethrow(e);
}
}
@Override
public Optional<FileMeta> fetchFileMeta(FileLocation fileLocation) {
String querySQL = fullSQL(QUERY_META_SQL);
try (Connection con = dataSource.getConnection()) {
ResultSet resultSet = con.createStatement().executeQuery(querySQL.concat(whereSQL(fileLocation)));
boolean exist = resultSet.next();
if (!exist) {
return Optional.empty();
}
FileMeta fileMeta = new FileMeta()
.setLength(resultSet.getLong("length"))
.setLastModifiedTime(resultSet.getDate("gmt_modified"))
.setMetaInfo(JsonUtils.parseMap(resultSet.getString("meta")));
return Optional.of(fileMeta);
} catch (Exception e) {
log.error("[PostgresqlSeriesDfsService] fetchFileMeta [{}] failed!", fileLocation);
ExceptionUtils.rethrow(e);
}
return Optional.empty();
}
@Override
public void cleanExpiredFiles(String bucket, int days) {
// 虽然官方提供了服务端删除的能力,依然强烈建议用户直接在数据库层面配置清理事件!!!
String dSQLPrefix = fullSQL(DELETE_SQL);
final long targetTs = DateUtils.addDays(new Date(System.currentTimeMillis()), -days).getTime();
final String targetDeleteTime = CommonUtils.formatTime(targetTs);
log.info("[PostgresqlSeriesDfsService] start to cleanExpiredFiles, targetDeleteTime: {}", targetDeleteTime);
String fSQL = dSQLPrefix.concat(String.format(" where gmt_modified < '%s'", targetDeleteTime));
log.info("[PostgresqlSeriesDfsService] cleanExpiredFiles SQL: {}", fSQL);
executeDelete(fSQL);
}
@Override
protected void init(ApplicationContext applicationContext) {
Environment env = applicationContext.getEnvironment();
PostgresqlProperty postgresqlProperty = new PostgresqlProperty()
.setDriver(fetchProperty(env, TYPE_POSTGRESQL, KEY_DRIVER_NAME))
.setUrl(fetchProperty(env, TYPE_POSTGRESQL, KEY_URL))
.setUsername(fetchProperty(env, TYPE_POSTGRESQL, KEY_USERNAME))
.setPassword(fetchProperty(env, TYPE_POSTGRESQL, KEY_PASSWORD))
.setAutoCreateTable(Boolean.TRUE.toString().equalsIgnoreCase(fetchProperty(env, TYPE_POSTGRESQL, KEY_AUTO_CREATE_TABLE)))
;
try {
initDatabase(postgresqlProperty);
initTable(postgresqlProperty);
} catch (Exception e) {
log.error("[PostgresqlSeriesDfsService] init datasource failed!", e);
ExceptionUtils.rethrow(e);
}
log.info("[PostgresqlSeriesDfsService] initialize successfully, THIS_WILL_BE_THE_STORAGE_LAYER.");
}
void initDatabase(PostgresqlProperty property) {
log.info("[PostgresqlSeriesDfsService] init datasource by config: {}", property);
HikariConfig config = new HikariConfig();
config.setDriverClassName(StringUtils.isEmpty(property.driver) ? "org.postgresql.Driver" : property.driver);
config.setJdbcUrl(property.url);
config.setUsername(property.username);
config.setPassword(property.password);
config.setAutoCommit(true);
// 池中最小空闲连接数量
config.setMinimumIdle(2);
// 池中最大连接数量
config.setMaximumPoolSize(32);
dataSource = new HikariDataSource(config);
}
void initTable(PostgresqlProperty property) throws Exception {
if (property.autoCreateTable) {
String powerjobFilesIdSeq = fullSQL(POWERJOB_FILES_ID_SEQ);
String createTableSQL = fullSQL(CREATE_TABLE_SQL);
log.info("[PostgresqlSeriesDfsService] use create table SQL: {}", createTableSQL);
try (Connection connection = dataSource.getConnection()) {
connection.createStatement().execute(powerjobFilesIdSeq);
connection.createStatement().execute(createTableSQL);
log.info("[PostgresqlSeriesDfsService] auto create table successfully!");
}
}
}
private String fullSQL(String sql) {
return String.format(sql, parseTableName());
}
private String parseTableName() {
// 误删,兼容本地 unit test
if (applicationContext == null) {
return DEFAULT_TABLE_NAME;
}
String tableName = fetchProperty(applicationContext.getEnvironment(), TYPE_POSTGRESQL, KEY_TABLE_NAME);
return StringUtils.isEmpty(tableName) ? DEFAULT_TABLE_NAME : tableName;
}
private static String whereSQL(FileLocation fileLocation) {
return String.format(" where bucket='%s' AND name='%s' ", fileLocation.getBucket(), fileLocation.getName());
}
@Override
public void destroy() throws Exception {
}
@Data
@Accessors(chain = true)
static class PostgresqlProperty {
private String driver;
private String url;
private String username;
private String password;
private boolean autoCreateTable;
}
public static class PostgresqlSeriesCondition extends PropertyAndOneBeanCondition {
@Override
protected List<String> anyConfigKey() {
return Lists.newArrayList("oms.storage.dfs.postgresql_series.url");
}
@Override
protected Class<?> beanType() {
return DFsService.class;
}
}
}

View File

@ -0,0 +1,88 @@
package tech.powerjob.server.persistence.storage.impl;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.io.FileUtils;
import org.junit.jupiter.api.Test;
import tech.powerjob.common.serialize.JsonUtils;
import tech.powerjob.server.common.utils.OmsFileUtils;
import tech.powerjob.server.extension.dfs.*;
import java.io.File;
import java.nio.charset.StandardCharsets;
import java.util.Optional;
import java.util.concurrent.ThreadLocalRandom;
/**
* AbstractDfsServiceTest
*
* @author tjq
* @since 2023/7/30
*/
@Slf4j
public abstract class AbstractDfsServiceTest {
private static final String BUCKET = "pj_test";
abstract protected Optional<DFsService> fetchService();
@Test
void testBaseFileOperation() throws Exception {
Optional<DFsService> aliOssServiceOpt = fetchService();
if (!aliOssServiceOpt.isPresent()) {
return;
}
DFsService aliOssService = aliOssServiceOpt.get();
String content = "wlcgyqsl".concat(String.valueOf(ThreadLocalRandom.current().nextLong()));
String temporarySourcePath = OmsFileUtils.genTemporaryWorkPath() + "source.txt";
String temporaryDownloadPath = OmsFileUtils.genTemporaryWorkPath() + "download.txt";
log.info("[testBaseFileOperation] temporarySourcePath: {}", temporarySourcePath);
File sourceFile = new File(temporarySourcePath);
FileUtils.forceMkdirParent(sourceFile);
OmsFileUtils.string2File(content, sourceFile);
FileLocation fileLocation = new FileLocation().setBucket(BUCKET).setName(String.format("test_%d.txt", ThreadLocalRandom.current().nextLong()));
StoreRequest storeRequest = new StoreRequest()
.setFileLocation(fileLocation)
.setLocalFile(sourceFile);
// 存储
aliOssService.store(storeRequest);
// 读取 meta
Optional<FileMeta> metaOpt = aliOssService.fetchFileMeta(fileLocation);
assert metaOpt.isPresent();
log.info("[testBaseFileOperation] file meta: {}", JsonUtils.toJSONString(metaOpt.get()));
// 下载
log.info("[testBaseFileOperation] temporaryDownloadPath: {}", temporaryDownloadPath);
File downloadFile = new File(temporaryDownloadPath);
DownloadRequest downloadRequest = new DownloadRequest()
.setFileLocation(fileLocation)
.setTarget(downloadFile);
aliOssService.download(downloadRequest);
String downloadFileContent = FileUtils.readFileToString(downloadFile, StandardCharsets.UTF_8);
log.info("[testBaseFileOperation] download content: {}", downloadFileContent);
assert downloadFileContent.equals(content);
// 定时清理,只是执行,不校验
aliOssService.cleanExpiredFiles(BUCKET, 3);
}
@Test
void testFileNotExist() throws Exception {
Optional<DFsService> aliOssServiceOpt = fetchService();
if (!aliOssServiceOpt.isPresent()) {
return;
}
Optional<FileMeta> metaOpt = aliOssServiceOpt.get().fetchFileMeta(new FileLocation().setBucket("tjq").setName("yhz"));
assert !metaOpt.isPresent();
}
}

View File

@ -0,0 +1,49 @@
package tech.powerjob.server.persistence.storage.impl;
import com.aliyun.oss.common.utils.AuthUtils;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.exception.ExceptionUtils;
import tech.powerjob.server.extension.dfs.DFsService;
import java.util.Optional;
/**
* test AliOSS
*
* @author tjq
* @since 2023/7/30
*/
@Slf4j
class AliOssServiceTest extends AbstractDfsServiceTest {
private static final String BUCKET = "power-job";
/**
* 依赖阿里云账号密码测试,为了保证单测在其他环境也能通过,如果发现不存在配置则直接跳过
* @return AliOssService
*/
@Override
protected Optional<DFsService> fetchService() {
String accessKeyId = StringUtils.trim(System.getenv(AuthUtils.ACCESS_KEY_ENV_VAR));
String secretAccessKey = StringUtils.trim(System.getenv(AuthUtils.SECRET_KEY_ENV_VAR));
String bucket = Optional.ofNullable(System.getenv("POWERJOB_OSS_BUEKCT")).orElse(BUCKET);
log.info("[AliOssServiceTest] ak: {}, sk: {}", accessKeyId, secretAccessKey);
if (StringUtils.isAnyEmpty(accessKeyId, secretAccessKey)) {
return Optional.empty();
}
try {
AliOssService aliOssService = new AliOssService();
aliOssService.initOssClient("oss-cn-beijing.aliyuncs.com", bucket, AliOssService.CredentialType.ENV.getCode(), null, null, null);
return Optional.of(aliOssService);
} catch (Exception e) {
ExceptionUtils.rethrow(e);
}
return Optional.empty();
}
}

View File

@ -0,0 +1,32 @@
package tech.powerjob.server.persistence.storage.impl;
import lombok.extern.slf4j.Slf4j;
import tech.powerjob.server.common.utils.TestUtils;
import tech.powerjob.server.extension.dfs.DFsService;
import java.util.Optional;
/**
* test GridFS
*
* @author tjq
* @since 2023/7/30
*/
@Slf4j
class GridFsServiceTest extends AbstractDfsServiceTest {
@Override
protected Optional<DFsService> fetchService() {
Object mongoUri = TestUtils.fetchTestConfig().get(TestUtils.KEY_MONGO_URI);
if (mongoUri == null) {
log.info("[GridFsServiceTest] mongoUri is null, skip load!");
return Optional.empty();
}
GridFsService gridFsService = new GridFsService();
gridFsService.initMongo(String.valueOf(mongoUri));
return Optional.of(gridFsService);
}
}

View File

@ -0,0 +1,30 @@
package tech.powerjob.server.persistence.storage.impl;
import lombok.extern.slf4j.Slf4j;
import tech.powerjob.server.extension.dfs.DFsService;
import java.util.Optional;
/**
* MinioOssServiceTest
* 测试需要先本地部署 minio因此捕获异常失败也不阻断测试
*
* @author tjq
* @since 2024/2/26
*/
@Slf4j
class MinioOssServiceTest extends AbstractDfsServiceTest {
@Override
protected Optional<DFsService> fetchService() {
try {
MinioOssService aliOssService = new MinioOssService();
aliOssService.initOssClient("http://192.168.124.23:9000", "pj2","testAk", "testSktestSktestSk");
return Optional.of(aliOssService);
} catch (Exception e) {
// 仅异常提醒
log.error("[MinioOssServiceTest] test exception!", e);
}
return Optional.empty();
}
}

View File

@ -0,0 +1,42 @@
package tech.powerjob.server.persistence.storage.impl;
import tech.powerjob.common.utils.NetUtils;
import tech.powerjob.server.extension.dfs.DFsService;
import java.util.Optional;
/**
* MySqlSeriesDfsServiceTest
*
* @author tjq
* @since 2023/8/10
*/
class MySqlSeriesDfsServiceTest extends AbstractDfsServiceTest {
@Override
protected Optional<DFsService> fetchService() {
boolean dbAvailable = NetUtils.checkIpPortAvailable("127.0.0.1", 3306);
if (dbAvailable) {
MySqlSeriesDfsService mySqlSeriesDfsService = new MySqlSeriesDfsService();
try {
MySqlSeriesDfsService.MySQLProperty mySQLProperty = new MySqlSeriesDfsService.MySQLProperty()
.setDriver("com.mysql.cj.jdbc.Driver")
.setUrl("jdbc:mysql://localhost:3306/powerjob-daily?useUnicode=true&characterEncoding=UTF-8&serverTimezone=Asia/Shanghai")
.setUsername("root")
.setAutoCreateTable(true)
.setPassword("No1Bug2Please3!");
mySqlSeriesDfsService.initDatabase(mySQLProperty);
mySqlSeriesDfsService.initTable(mySQLProperty);
return Optional.of(mySqlSeriesDfsService);
} catch (Exception e) {
e.printStackTrace();
}
}
return Optional.empty();
}
}