Detailed Statistical Analysis
Gaining an edge in understanding player performance requires thorough statistical analysis. Our platform provides comprehensive stats that include goals scored per game, assists per matchday average shots taken by top shooters among other vital metrics that inform strategic decisions both within teams' management circles as well as individual bettors' choices!
- GPA (Goals Per Assist): Analyze which players contribute most effectively towards scoring opportunities across different teams!davidchuanjun/xiaochengxu-demo<|file_sep|>/README.md
# xiaochengxu-demo
小程序demo
<|file_sep|>//index.js
//获取应用实例
const app = getApp()
var util = require('../../utils/util.js');
Page({
data: {
},
//事件处理函数
bindViewTap: function() {
},
onLoad: function () {
var self = this;
wx.request({
url: 'https://api.douban.com/v2/movie/in_theaters',
data:{
count:10,
start:0,
city:'北京',
'apikey':'0b2bdeda43b5688921839c8ecb20399b'
},
header:{
'content-type':'application/json'
},
method:'GET',
success:function(res){
console.log(res);
self.setData({
movieList : res.data.subjects
})
},
fail:function(){
}
});
}
})
<|file_sep|>// pages/detail/detail.js
var util = require('../../utils/util.js');
Page({
/**
* 页面的初始数据
*/
data: {
},
/**
* 生命周期函数--监听页面加载
*/
onLoad: function (options) {
var self = this;
wx.request({
url: 'https://api.douban.com/v2/movie/subject/' + options.id,
data:{
'apikey':'0b2bdeda43b5688921839c8ecb20399b'
},
header:{
'content-type':'application/json'
},
method:'GET',
success:function(res){
console.log(res);
var movieDetail = res.data;
movieDetail.cover = util.imgPath(movieDetail.images.large);
movieDetail.year = movieDetail.year || '';
movieDetail.rating.average = movieDetail.rating.average || '';
self.setData({
detailInfo : movieDetail
})
},
fail:function(){
}
});
},
/**
* 生命周期函数--监听页面初次渲染完成
*/
onReady: function () {
},
/**
* 生命周期函数--监听页面显示
*/
onShow: function () {
},
/**
* 生命周期函数--监听页面隐藏
*/
onHide: function () {
},
/**
* 生命周期函数--监听页面卸载
*/
onUnload: function () {
},
/**
* 页面相关事件处理函数--监听用户下拉动作
*/
onPullDownRefresh: function () {
},
/**
* 页面上拉触底事件的处理函数
*/
onReachBottom: function () {
},
})<|file_sep|>//index.js
//获取应用实例
var util = require('../../utils/util.js');
const app = getApp()
Page({
// 获取应用实例
const app = getApp()
Page({
})
<|repo_name|>davidchuanjun/xiaochengxu-demo<|file_sep|>/pages/tabbar/index/index.js
//index.js
//获取应用实例
const app = getApp()
Page({
// 获取应用实例
const app = getApp()
Page({
})
<|file_sep|># -*- coding:utf-8 -*-
import os
import numpy as np
from dataloader import *
class SQADataLoader(Dataset):
def __init__(self,
sqa_path,
vocab_path,
max_seq_length=100,
max_ques_length=30,
max_ans_length=30,
batch_size=32,
train=True):
self.sqa_path = sqa_path
self.max_seq_length = max_seq_length
self.max_ques_length = max_ques_length
self.max_ans_length = max_ans_length
self.batch_size = batch_size
self.vocab_path = vocab_path
self.train = train
self.vocab,self.rev_vocab,_ = load_vocab(vocab_path)
if train:
self.train_file_list,self.train_index_list,self.train_data_num,self.train_label_list,self.train_mask_list,self.train_label_mask_list,self.train_ans_list,self.train_ans_mask_list,self.train_ques_list,self.train_ques_mask_list,self.train_seq_list,self.train_seq_mask_list=self.load_train_data(sqa_path)
self.pointer=0
print("Train Data Load Finish!")
else:
self.test_file_list,self.test_index_list,self.test_data_num,self.test_label_list,self.test_mask_list,self.test_label_mask_list=self.load_test_data(sqa_path)
print("Test Data Load Finish!")
def load_train_data(self,sqa_path):
file_lists=[]
for rootdir , subdirs , filenames in os.walk(sqa_path):
for filename in filenames:
if filename.endswith('.txt'):
file_lists.append(os.path.join(rootdir,filename))
data_num=len(file_lists)
label_list=np.zeros([data_num])
mask_list=np.zeros([data_num])
label_mask_list=np.zeros([data_num])
ans_list=np.zeros([data_num])
ans_mask_list=np.zeros([data_num])
ques_list=np.zeros([data_num])
ques_mask_list=np.zeros([data_num])
seq_list=np.zeros([data_num])
seq_mask_list=np.zeros([data_num])
index_list=[]
for i , file_name in enumerate(file_lists):
with open(file_name,"r",encoding='utf-8') as f:
lines=f.readlines()
for j , line in enumerate(lines):
line=line.strip("n")
split_line=line.split(" ")
seq=[]
mask=[]
label=-1
label_mask=0
ans=-1
ans_mask=0
seq_len=0
mask_len=0
for k , token in enumerate(split_line):
if token.startswith("label"):
label=int(token.split(":")[1])
label_mask=1
elif token.startswith("seq"):
token=token.split(":")[1]
if seq_len==0:
seq=[self.vocab["
sentence"]]+[self.vocab.get(token,-1)] mask=[1]+[1] seq_len+=2 mask_len+=2 elif seq_len<=self.max_seq_length-1: seq.append(self.vocab.get(token,-1)) mask.append(1) seq_len+=1 mask_len+=1 elif token.startswith("ques"): token=token.split(":")[1] if j==0: if ques_len==0: ques=[self.vocab[" sentence"]]+[self.vocab.get(token,-1)] ques_mask=[1]+[1] ques_len+=2 mask_len+=2 elif ques_len<=self.max_ques_length-1: ques.append(self.vocab.get(token,-1)) ques_mask.append(1) ques_len+=1 mask_len+=1 elif token.startswith("ans"): token=token.split(":")[1] if j==len(lines)-1: if ans_len==0: ans=[self.vocab[" sentence"]]+[self.vocab.get(token,-1)] ans_mask=[1]+[1] ans_len+=2 mask_len+=2 elif ans_len<=self.max_ans_length-1: ans.append(self.vocab.get(token,-1)) ans_mask.append(1) ans_len+=1 mask_len+=1 if seq_len==0 or mask_len==0: continue while seq_len!=mask_len: seq.pop() seq.append(-100) seq_mask.pop() seq_mask.append(0) while len(seq)!=self.max_seq_length: seq.append(-100) seq_mask.append(0) while len(mask)!=self.max_seq_length: mask.append(0) while len(seq)!=self.max_seq_length or len(mask)!=self.max_seq_length or len(seq_mask)!=self.max_seq_length or label==-1 or label_mask==0 : print(file_name+" error !") exit() label=label.tolist() mask=mask.tolist() seq=seq.tolist() seq_mask=seq_mask.tolist() label_mask=label_mask.tolist() index_list.append(i) label_list[i]=label mask_list[i]=mask label_mask_list[i]=label_mask ans[i]=ans ans_mask[i]=ans_mask ques[i]=ques ques_mask[i]=ques_mask seq[i]=seq seq_mask[i]=seq_mask return file_lists,index_list,data_num,label_list,mask