django—将服务器上的文件保存至本地以及本地文件上传

django—将服务器上的文件保存至本地以及本地文件上传1 用户 down 线上文件这个有两种情况 一种是用户申请 页面跑完数据 文件直接 down 下来 一种是用户需要点击特定的按钮去下载这个文件 整体的来说这两种情况大同小异 看具体需求首先不管是那种需求 都要现在项目的配置文件 settings py 去设置静态文件路径 STATIC URL static STATICFILES DIRS os path join BASE

首先不管是那种需求,都要现在项目的配置文件settings.py 去设置静态文件路径

STATIC_URL = '/static/' STATICFILES_DIRS = [ os.path.join(BASE_DIR, 'static'), ] 
#用户点击下载按钮,下载文件 #后端 #配置路径 DOWN_ROOT = os.path.join(settings.BASE_DIR, 'static/download').replace("//", "/") #定义文件名称 fileLocation = DOWN_ROOT + '/data' + handledate.split(' ')[0]+'_'+userName+'_'+userTime #设定文件输出样式 filename = fileLocation + '.xlsx' #吐出数据 datamerge.to_excel(filename, encoding='utf-8') 
 def downloadData(request): test = request.POST.get('test', '') #从前端拿到用户要下载的文件名 fileName=request.POST.get("fileName") DOWN_ROOT = os.path.join(settings.BASE_DIR, 'static/download').replace("//", "/") if test == '': fileLocation = DOWN_ROOT elif int(test) == 2: fileLocation = DOWN_ROOT + '/marketTrend' elif int(test) == 3: fileLocation = DOWN_ROOT + '/consumer' # file=open(fileLocation+'/'+fileName,'rb') #这一块的代码就是块状读取文件并输出 def file_iterator(file_name, chunk_size=512): with open(file_name,'rb') as f: while True: c = f.read(chunk_size) if c: yield c else: break the_file_name = fileLocation+'/'+fileName response = StreamingHttpResponse(file_iterator(the_file_name)) response['Content-Type']='application/vnd.ms-excel' response['Content-Disposition'] = 'attachment;filename="{0}"'.format(the_file_name) print(fileName) return response 

用户下载好的文件直接一步down到本地,我这里举例得失down表格

def marketPattern_handle(request): #设置文件名 file_names=str(time.time())+'.xls' #设定文件的编码格式,以及建立一个excel wb_brand = xlwt.Workbook(encoding='utf-8') sheet1 = wb_brand.add_sheet('UV') #设定表格的列名 相当于 pd.DataFrame(columns=['','','']) sheet1.write(0, 0, u'包名') sheet1.write(0, 1, u'时间') sheet1.write(0, 2, u'TOP品牌') sheet1.write(0, 3, u'销售金额指数') sheet1.write(0, 4, u'销量') sheet1.write(0, 5, u'平均价格指数') sheet1.write(0, 6, u'购买人数(指数)') sheet1.write(0, 7, u'购买频次') sheet1.write(0, 8, u'单次购买量(指数)') sheet1.write(0, 9, u'搜索人数(指数)') sheet1.write(0, 10, u'搜索人数转化率') #这个是设定的行数,从1开始, excel_row_brand = 1 for item in responseFour['data']: #往表格里面填写数据 sheet1.write(excel_row_brand, 0, ontTagInfo[oneId]) sheet1.write(excel_row_brand, 1, mydate) sheet1.write(excel_row_brand, 2, item['brandName']) sheet1.write(excel_row_brand, 3, item['salesAmount']) sheet1.write(excel_row_brand, 4, item['payItmQty']) sheet1.write(excel_row_brand, 5, item['payAvgPrice']) sheet1.write(excel_row_brand, 6, item['payCnt']) sheet1.write(excel_row_brand, 7, item['payFrequency']) sheet1.write(excel_row_brand, 8, item['perPayCnt']) sheet1.write(excel_row_brand, 9, item['searchCnt']) sheet1.write(excel_row_brand, 10, item['searchPayPercent']) excel_row_brand += 1 output = BytesIO() wb_brand.save(output) output.seek(0) response = HttpResponse(content_type='application/vnd.ms-excel') response['Content-Disposition'] = 'attachment;filename=' + file_names print('-----------------------------------最后-----------------------------------------') response.write(output.getvalue()) 
#前端。这里我这贴出相对应的js部分 #上传文件,并做处理 $("#file-1").fileinput({ uploadUrl: '/xgb/upload_csv_save', #这里就是控制上传的文件格式 allowedFileExtensions : ['csv','xlsx','xls'], overwriteInitial: false, maxFileSize: 10240, maxFilesNum: 10, language : 'zh', allowedPreviewTypes: ['image'], showPreview:false, //allowedFileTypes: ['image', 'video', 'flash'], slugCallback: function(filename) { return filename.replace('(', '_').replace(']', '_'); }, uploadExtraData:function(previewId,index){ var obj={}; obj.data_type=$('#data_source option:selected').val(); obj.fileName=$('input[name="myexcel"]').val(); return obj; } }); #前端接收后端返回的结果 $("#file-1").on("fileuploaded", function (event, data, previewId, index) { if(data.response.state=='success'){ swal(data.response.code) }else{ swal(data.response.errorRows.toString()) } }); 

#后端代码主要就是负责将用户上传的文件保存在服务器。这里我大概说一下,因为针对文件保存模式,大家可以根据自己的需求,我这里呢主要是需要将前端上传的表格存到数据库。

def upload_xls_save(request): current_user_groups = request.user.groups.values_list('name',flat=True) current_user_groups_list = list(current_user_groups) if '管理员' not in current_user_groups_list: return HttpResponseRedirect('/') response_data = {} UPLOAD_ROOT=os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'upload') if request.method == 'POST': file_obj = request.FILES.get('xls_data_file', None) file_name='history_'+datetime.now().strftime("%Y%m%d%H%M%S")+'_'+file_obj.name file_full_path = os.path.join(UPLOAD_ROOT, file_name) dest = open(file_full_path,'wb+') dest.write(file_obj.read()) dest.close() fileInfo = cchardet.detect(request.FILES['xls_data_file'].read()) if fileInfo['encoding'] in ['UTF-8','UTF-8-SIG']: df = pd.read_excel(file_full_path, encoding='UTF8') else: df = pd.read_excel(file_full_path, encoding='GBK') columns =[ 'Date', 'Haircare_RTB_Banner_Spending', 'Haircare_RTB_Search_Spending', 'Haircolor_RTB_Banner_Spending', 'Haircolor_RTB_Search_Spending', 'Styling_RTB_Search_Spending', 'GMV_Haircare_PI', 'GMV_Haircolor_PI', 'GMV_Styling_PI', 'GMV_TTL_PI', 'TTL_PLV', 'Promotion_Type', 'GMV_TTL', 'Oil_Golden_TTL', 'Oil_Golden_Single', 'Golden_Red_Set', 'Oil_Red_TTL', 'Oil_Red_Single', 'Red_Brown_Set', 'AN_Shampoo_700_TTL', 'AN_Shampoo_700_Single', 'AN_Shampoo_500_TTL', 'AN_Shampoo_500_Single', 'AN_Conditioner_500_TTL', 'AN_Conditioner_500_Single', 'AN_500_Set', 'HM_Shampoo_700_TTL', 'HM_Shampoo_700_Single', 'HM_Set', 'HM_Shampoo_500_TTL', 'HM_Shampoo_500_Single', 'HM_500_Set', 'Excellence_Cream_1_TTL', 'Excellence_Cream_3_TTL', 'Clay_TTL', 'Spray_TTL' ] #因为这里要从表里抽取特定的列做数据预测,所以要与用户上传的数据的列名做对比 features_columns = [ 'Date', 'Haircare_RTB_Banner_Spending', 'Haircare_RTB_Search_Spending', 'Haircolor_RTB_Banner_Spending', 'Haircolor_RTB_Search_Spending', 'Styling_RTB_Search_Spending', 'GMV_Haircare_PI', 'GMV_Haircolor_PI', 'GMV_Styling_PI', 'GMV_TTL_PI', 'TTL_PLV', 'Promotion_Type', ] diffs = list(set(columns).difference(set(df.columns))) if len(diffs)>0: response_data['status']='error' response_data['msg'] = ','.join(diffs)+'列缺失,上传失败!' return HttpResponse(json.dumps(response_data), content_type="application/json") data_df = df[columns] data_df['Promotion_Type'] = data_df['Promotion_Type'].astype('str') Promotiontypes = ['双十一','618','三八节','超品日','年货节','520','618预热','99','大牌风暴','双十二','Brand-Promo','满减','Co-Promo','日常','其他促销'] no_promotion_types = data_df.loc[~data_df['Promotion_Type'].isin(Promotiontypes),'Date'] if no_promotion_types.empty is False: drop_datas = list(map(lambda x:str(x).replace(' 00:00:00',' '),no_promotion_types.tolist())) response_data['status']='error' response_data['msg'] = ','.join(drop_datas) +'的Promotion_Type异常!' return HttpResponse(json.dumps(response_data), content_type="application/json") #特征列出现空值 remaining_data = data_df[features_columns].dropna(axis=0,how='any') drop_data = list(set(data_df['Date']).difference(set(remaining_data['Date']))) if len(drop_data)>0: drop_datas = list(map(lambda x:str(x).replace(' 00:00:00',' '),drop_data)) response_data['msg']=','.join(drop_datas) +'数据缺失或上传失败!' response_data['status']='error' return HttpResponse(json.dumps(response_data), content_type="application/json") #如果目标列出现空值,按照 -1 去处理 缺货等其他情况 remaining_data = data_df.fillna(-1) drop_data = [] remaining_data_dict = remaining_data.to_dict('records') for item in remaining_data_dict: try: isExist = HairCate.objects.get(Dates=item['Date']) if isExist: update_result = HairCate.objects.filter(Dates=item['Date']).update( Haircare_RTB_Banner_Spending= item['Haircare_RTB_Banner_Spending'], Haircare_RTB_Search_Spending= item['Haircare_RTB_Search_Spending'], Haircolor_RTB_Banner_Spending=item['Haircolor_RTB_Banner_Spending'], Haircolor_RTB_Search_Spending=item['Haircolor_RTB_Search_Spending'], Styling_RTB_Search_Spending=item['Styling_RTB_Search_Spending'], GMV_Haircare_PI=item['GMV_Haircare_PI'], GMV_Haircolor_PI=item['GMV_Haircolor_PI'], GMV_Styling_PI=item['GMV_Styling_PI'], GMV_TTL_PI=item['GMV_TTL_PI'], TTL_PLV=item['TTL_PLV'], Promotion_Type=item['Promotion_Type'], GMV_TTL=item['GMV_TTL'], Oil_Golden_TTL=item['Oil_Golden_TTL'], Oil_Golden_Single=item['Oil_Golden_Single'], Golden_Red_Set=item['Golden_Red_Set'], Oil_Red_TTL=item['Oil_Red_TTL'], Oil_Red_Single=item['Oil_Red_Single'], Red_Brown_Set=item['Red_Brown_Set'], AN_Shampoo_700_TTL=item['AN_Shampoo_700_TTL'], AN_Shampoo_700_Single=item['AN_Shampoo_700_Single'], AN_Shampoo_500_TTL=item['AN_Shampoo_500_TTL'], AN_Shampoo_500_Single=item['AN_Shampoo_500_Single'], AN_Conditioner_500_TTL=item['AN_Conditioner_500_TTL'], AN_Conditioner_500_Single=item['AN_Conditioner_500_Single'], AN_500_Set=item['AN_500_Set'], HM_Shampoo_700_TTL=item['HM_Shampoo_700_TTL'], HM_Shampoo_700_Single=item['HM_Shampoo_700_Single'], HM_Set=item['HM_Set'], HM_Shampoo_500_TTL=item['HM_Shampoo_500_TTL'], HM_Shampoo_500_Single=item['HM_Shampoo_500_Single'], HM_500_Set=item['HM_500_Set'], Excellence_Cream_1_TTL=item['Excellence_Cream_1_TTL'], Excellence_Cream_3_TTL=item['Excellence_Cream_3_TTL'], Clay_TTL=item['Clay_TTL'], Spray_TTL=item['Spray_TTL'] ) if update_result is 0: drop_data.append(item['Date']) except HairCate.DoesNotExist: try: hairCate = HairCate( Dates=item['Date'], Haircare_RTB_Banner_Spending=item['Haircare_RTB_Banner_Spending'], Haircare_RTB_Search_Spending=item['Haircare_RTB_Search_Spending'], Haircolor_RTB_Banner_Spending=item['Haircolor_RTB_Banner_Spending'], Haircolor_RTB_Search_Spending=item['Haircolor_RTB_Search_Spending'], Styling_RTB_Search_Spending=item['Styling_RTB_Search_Spending'], GMV_Haircare_PI=item['GMV_Haircare_PI'], GMV_Haircolor_PI=item['GMV_Haircolor_PI'], GMV_Styling_PI=item['GMV_Styling_PI'], GMV_TTL_PI=item['GMV_TTL_PI'], TTL_PLV=item['TTL_PLV'], Promotion_Type=item['Promotion_Type'], GMV_TTL=item['GMV_TTL'], Oil_Golden_TTL=item['Oil_Golden_TTL'], Oil_Golden_Single=item['Oil_Golden_Single'], Golden_Red_Set=item['Golden_Red_Set'], Oil_Red_TTL=item['Oil_Red_TTL'], Oil_Red_Single=item['Oil_Red_Single'], Red_Brown_Set=item['Red_Brown_Set'], AN_Shampoo_700_TTL=item['AN_Shampoo_700_TTL'], AN_Shampoo_700_Single=item['AN_Shampoo_700_Single'], AN_Shampoo_500_TTL=item['AN_Shampoo_500_TTL'], AN_Shampoo_500_Single=item['AN_Shampoo_500_Single'], AN_Conditioner_500_TTL=item['AN_Conditioner_500_TTL'], AN_Conditioner_500_Single=item['AN_Conditioner_500_Single'], AN_500_Set=item['AN_500_Set'], HM_Shampoo_700_TTL=item['HM_Shampoo_700_TTL'], HM_Shampoo_700_Single=item['HM_Shampoo_700_Single'], HM_Set=item['HM_Set'], HM_Shampoo_500_TTL=item['HM_Shampoo_500_TTL'], HM_Shampoo_500_Single=item['HM_Shampoo_500_Single'], HM_500_Set=item['HM_500_Set'], Excellence_Cream_1_TTL=item['Excellence_Cream_1_TTL'], Excellence_Cream_3_TTL=item['Excellence_Cream_3_TTL'], Clay_TTL=item['Clay_TTL'], Spray_TTL=item['Spray_TTL'] ) hairCate.save() except Exception as e: drop_data.append(item['Date']) if len(drop_data)>0: drop_datas = list(map(lambda x:str(x).replace(' 00:00:00',' '),drop_data)) response_data['errinfo']=','.join(drop_datas) +'数据异常,上传失败!' response_data['status']='success' response_data['msg'] = '上传成功' else: response_data['status']='error' response_data['msg'] = '只允许post上传' return HttpResponse(json.dumps(response_data), content_type="application/json") 
版权声明:本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请联系我们举报,一经查实,本站将立刻删除。

发布者:全栈程序员-站长,转载请注明出处:https://javaforall.net/224173.html原文链接:https://javaforall.net

(0)
上一篇 2026年3月17日 下午12:32
下一篇 2026年3月17日 下午12:33


相关推荐

发表回复

您的邮箱地址不会被公开。 必填项已用 * 标注

关注全栈程序员社区公众号